예제 #1
0
def test_picamera():
    from donkeycar.parts.camera import PiCamera
    resolution = (120, 160)
    cam = PiCamera(resolution=resolution)
    frame = cam.run()
    #assert shape is as expected. img_array shape shows (width, height, channels)
    assert frame.shape[:2] == resolution[::-1]
예제 #2
0
파일: network.py 프로젝트: cfox570/dk
def test_udp_broadcast(ip):

    if ip is None:
        print("udp broadcast test..")
        p = UDPValuePub('camera')
        from donkeycar.parts.camera import PiCamera
        from donkeycar.parts.image import ImgArrToJpg
        cam = PiCamera(160, 120, 3, framerate=4)
        img_conv = ImgArrToJpg()
        time.sleep(1)

        while True:
            cam_img = cam.run()
            jpg = img_conv.run(cam_img)
            print("sending", len(jpg), "bytes")
            p.run(jpg)
            time.sleep(0.5)

    else:
        print("udp listen test..", ip)
        s = UDPValueSub('camera')

        while True:
            res = s.run()
            time.sleep(0.1)
예제 #3
0
파일: network.py 프로젝트: cfox570/dk
def test_tcp_client_server(ip):

    if ip is None:
        p = TCPServeValue("camera")
        from donkeycar.parts.camera import PiCamera
        from donkeycar.parts.image import ImgArrToJpg
        cam = PiCamera(160, 120, 3, framerate=4)
        img_conv = ImgArrToJpg()
        while True:
            cam_img = cam.run()
            jpg = img_conv.run(cam_img)
            p.run(jpg)
            time.sleep(0.1)
    else:
        c = TCPClientValue("camera", ip)
        while True:
            c.run()
            time.sleep(0.01)
예제 #4
0
파일: manage.py 프로젝트: C425Roy/-
        def run(self, mode, user_angle, user_throttle, pilot_angle,
                pilot_throttle):
            seat = np.zeros((1, 2))
            seat[0][0] = 0
            if mode == 'user':

                cam = PiCamera()
                data1 = np.loadtxt(
                    "/home/pi/projects/donkeycar/donkeycar/parts/data.txt")
                data1 = np.reshape(data1, (1, 2))
                #cam.run()
                print(data1[0][0])
                print('x:', data1[0][1])
                area = data1[0][0]
                seat[0][0] = data1[0][1]
                if area < 20000 and area != 0:
                    if seat[0][0] < 1500:
                        user_angle = 0.15
                        user_throttle = -0.75
                    elif seat[0][0] > 1650:
                        user_angle = -0.05
                        user_throttle = -0.75
                    elif seat[0][0] >= 1500 or seat[0][0] <= 1650:
                        user_angle = 0
                        user_throttle = -0.75
                elif area >= 20000 or area <= 30000:
                    if seat[0][0] < 1300:
                        user_angle = 0
                        user_throttle = 0
                    elif seat[0][0] > 1400:
                        user_angle = 0
                        user_throttle = 0
                    elif seat[0][0] >= 1300 or seat[0][0] <= 1400:
                        user_angle = 0
                        user_throttle = 0
                if area > 30000:
                    if seat[0][0] < 1550:
                        user_angle = -0.01
                        user_throttle = 0.5
                    elif seat[0][0] > 1650:
                        user_angle = 0.15
                        user_throttle = 0.5
                    elif seat[0][0] >= 1550 or seat[0][0] <= 1650:
                        user_angle = 0
                        user_throttle = 0.5
                elif area == 0:
                    user_angle = 0
                    user_throttle = 0

                print('user_throttle', user_throttle)
                return user_angle, user_throttle

            elif mode == 'local_angle':
                return pilot_angle if pilot_angle else 0.0, user_throttle

            else:
                return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
예제 #5
0
def test(cfg):
    V = dk.vehicle.Vehicle()
    inputs = []
    threaded = True

    #cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
    cam = PiCamera(image_w=120, image_h=160, image_d=cfg.IMAGE_DEPTH)
    #cam = PiCamera(image_w=608, image_h=608, image_d=cfg.IMAGE_DEPTH)

    V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)

    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #6
0
def go(cfg):
    '''
    '''

    #Initialize car
    V = dk.vehicle.Vehicle()

    print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
    if cfg.CAMERA_TYPE == "PICAM":
        from donkeycar.parts.camera import PiCamera
        cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H)
    elif cfg.CAMERA_TYPE == "WEBCAM":
        from donkeycar.parts.camera import Webcam
        cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H)
    elif cfg.CAMERA_TYPE == "CVCAM":
        from donkeycar.parts.cv import CvCam
        cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H)

    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if cfg.CAMERA_TYPE == "CVCAM":
        from donkeycar.parts.cv import CvImageView
        mon = CvImageView()
        V.add(mon, inputs=['cam/image_array'])

    ctr = None
    '''
    ctr = LocalWebController()
    
    V.add(ctr, 
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)
        '''

    inputs = ['cam/image_array']
    types = ['image_array']
    '''
    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub, inputs=inputs, outputs=["tub/num_records"])

    if type(ctr) is LocalWebController:
        print("You can now go to <your pi ip address>:8887 to drive your car.")
    elif type(ctr) is JoystickController:
        print("You can now move your joystick to drive your car.")
        #tell the controller about the tub        
        ctr.set_tub(tub)
        '''

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #7
0
def drive(cfg):
    V = dk.vehicle.Vehicle()

    clock = Timestamp()
    V.add(clock, outputs=['timestamp'])

    cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    # 略
    V.mem['recording'] = True

    # JoyStickやWebControllerを改変し、以下の値をVehicleフレームワークの
    # メモリ上に格納する
    # .../value -1.0~1.0までの入力値→DCモータに加わる電圧になる
    # .../status 'move':前後動 'free':動力なし 'brake':制動停止

    # 以下両モータ半速前進する入力値をダミーとして投入
    V.mem['user/left/value'] = 0.5
    V.mem['user/left/status'] = 'move'  # 'free' 'brake'
    V.mem['user/right/value'] = 0.5
    V.mem['user/right/status'] = 'move'  # 'free' 'brake'

    # 対象Raspberry Pi上のpigpiodと通信するオブジェクト
    # 複数モータを使用する場合は、同じオブジェクトをそれぞれコンストラクタへ渡す
    # 但し、GPIOピンは重複させることはできない
    pi = pigpio.pi()

    # DCモータ1基ごとに1つのDCMotorインスタンスとして管理
    from donkeypart_dc130ra import DCMotor
    left_motor = DCMotor(pi, cfg.LEFT_MOTOR_IN1_GPIO, cfg.LEFT_MOTOR_IN2_GPIO)
    right_motor = DCMotor(pi, cfg.RIGHT_MOTOR_IN1_GPIO,
                          cfg.RIGHT_MOTOR_IN2_GPIO)

    # Vehicleフレームワークへ登録
    V.add(left_motor, inputs=['user/left/value', 'user/left/status'])
    V.add(right_motor, inputs=['user/right/value', 'user/right/status'])

    # Tubデータ・フォーマットも変更しなくてはならない
    inputs = [
        'cam/image_array', 'user/left/value', 'user/left/status',
        'user/right/status', 'user/right/status', 'timestamp'
    ]
    types = ['image_array', 'float', 'str', 'float', 'str', 'str']

    # single tub
    tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    # run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #8
0
파일: network.py 프로젝트: cfox570/dk
def test_mqtt_pub_sub(ip):

    if ip is None:
        print("publishing test..")
        p = MQTTValuePub('donkey/camera')
        from donkeycar.parts.camera import PiCamera
        from donkeycar.parts.image import ImgArrToJpg
        cam = PiCamera(160, 120, 3, framerate=4)
        img_conv = ImgArrToJpg()
        while True:
            cam_img = cam.run()
            jpg = img_conv.run(cam_img)
            p.run(jpg)
            time.sleep(0.1)

    else:
        print("subscribing test..")
        s = MQTTValueSub('donkey/camera')

        while True:
            res = s.run()
            print("got:", res)
            time.sleep(0.1)
예제 #9
0
def drive(cfg, model_path=None):
    vehicle = dk.vehicle.Vehicle()

    clock = Timestamp()
    vehicle.add(clock, outputs=['timestamp'])

    camera = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    vehicle.add(camera, outputs=['cam/image_array'], threaded=True)

    controller = PS3JoystickController(
        throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
        steering_scale=cfg.JOYSTICK_STEERING_SCALE,
        auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
    vehicle.add(
        controller,
        outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
        threaded=True)

    steering = PWMSteering(controller=PCA9685(cfg.STEERING_CHANNEL),
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)
    vehicle.add(steering, inputs=['user/angle'])

    throttle = PWMThrottle(controller=PCA9685(cfg.THROTTLE_CHANNEL),
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)
    vehicle.add(throttle, inputs=['user/throttle'])

    lidar = LidarScan()
    vehicle.add(lidar, outputs=['lidar/scan'], threaded=True)

    position = LidarPosition()
    vehicle.add(position,
                inputs=['lidar/scan'],
                outputs=['lidar/position', 'lidar/borders'],
                threaded=True)

    mqtt_client = MQTTClient(cfg)
    vehicle.add(mqtt_client,
                inputs=[
                    'user/mode', 'user/angle', 'user/throttle',
                    'lidar/position', 'lidar/borders'
                ],
                outputs=['remote/mode'],
                threaded=True)

    vehicle.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, model_path=None, model_type=None):
    """
    Construct a minimal robotic vehicle from many parts. Here, we use a
    single camera, web or joystick controller, autopilot and tubwriter.

    Each part runs as a job in the Vehicle loop, calling either its run or
    run_threaded method depending on the constructor flag `threaded`. All
    parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely
    manner. Parts may have named outputs and inputs. The framework handles
    passing named outputs to parts requesting the same named input.
    """

    car = dk.vehicle.Vehicle()
    # add camera
    inputs = []
    if cfg.DONKEY_GYM:
        from donkeycar.parts.dgym import DonkeyGymEnv
        cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH,
                           host=cfg.SIM_HOST,
                           env_name=cfg.DONKEY_GYM_ENV_NAME,
                           conf=cfg.GYM_CONF,
                           delay=cfg.SIM_ARTIFICIAL_LATENCY)
        inputs = ['angle', 'throttle', 'brake']
    elif cfg.CAMERA_TYPE == "PICAM":
        from donkeycar.parts.camera import PiCamera
        cam = PiCamera(image_w=cfg.IMAGE_W,
                       image_h=cfg.IMAGE_H,
                       image_d=cfg.IMAGE_DEPTH,
                       framerate=cfg.CAMERA_FRAMERATE,
                       vflip=cfg.CAMERA_VFLIP,
                       hflip=cfg.CAMERA_HFLIP)
    elif cfg.CAMERA_TYPE == "WEBCAM":
        from donkeycar.parts.camera import Webcam
        cam = Webcam(image_w=cfg.IMAGE_W,
                     image_h=cfg.IMAGE_H,
                     image_d=cfg.IMAGE_DEPTH)
    elif cfg.CAMERA_TYPE == "CVCAM":
        from donkeycar.parts.cv import CvCam
        cam = CvCam(image_w=cfg.IMAGE_W,
                    image_h=cfg.IMAGE_H,
                    image_d=cfg.IMAGE_DEPTH)
    elif cfg.CAMERA_TYPE == "CSIC":
        from donkeycar.parts.camera import CSICamera
        cam = CSICamera(image_w=cfg.IMAGE_W,
                        image_h=cfg.IMAGE_H,
                        image_d=cfg.IMAGE_DEPTH,
                        framerate=cfg.CAMERA_FRAMERATE,
                        gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
    elif cfg.CAMERA_TYPE == "V4L":
        from donkeycar.parts.camera import V4LCamera
        cam = V4LCamera(image_w=cfg.IMAGE_W,
                        image_h=cfg.IMAGE_H,
                        image_d=cfg.IMAGE_DEPTH,
                        framerate=cfg.CAMERA_FRAMERATE)
    elif cfg.CAMERA_TYPE == "MOCK":
        from donkeycar.parts.camera import MockCamera
        cam = MockCamera(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH)
    elif cfg.CAMERA_TYPE == "IMAGE_LIST":
        from donkeycar.parts.camera import ImageListCamera
        cam = ImageListCamera(path_mask=cfg.PATH_MASK)
    else:
        raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

    car.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=True)

    # add lidar
    if cfg.USE_LIDAR:
        if cfg.LIDAR_TYPE == 'RP':
            print("adding RP lidar part")
            lidar = RPLidar(lower_limit=cfg.LIDAR_LOWER_LIMIT,
                            upper_limit=cfg.LIDAR_UPPER_LIMIT)
            car.add(lidar,
                    inputs=[],
                    outputs=['lidar/dist_array'],
                    threaded=True)
        if cfg.LIDAR_TYPE == 'YD':
            print("YD Lidar not yet supported")

    # add controller
    if cfg.USE_RC:
        rc_steering = RCReceiver(cfg.STEERING_RC_GPIO, invert=True)
        rc_throttle = RCReceiver(cfg.THROTTLE_RC_GPIO)
        rc_wiper = RCReceiver(cfg.DATA_WIPER_RC_GPIO, jitter=0.05, no_action=0)
        car.add(rc_steering, outputs=['user/angle', 'user/angle_on'])
        car.add(rc_throttle, outputs=['user/throttle', 'user/throttle_on'])
        car.add(rc_wiper, outputs=['user/wiper', 'user/wiper_on'])
        car.add(RCHelper(), outputs=['user/mode'])
        ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT,
                                 mode=cfg.WEB_INIT_MODE)
        car.add(ctr,
                inputs=['cam/image_array'],
                outputs=['recording'],
                threaded=True)

    else:
        if cfg.USE_JOYSTICK_AS_DEFAULT:
            from donkeycar.parts.controller import get_js_controller
            ctr = get_js_controller(cfg)
            if cfg.USE_NETWORKED_JS:
                from donkeycar.parts.controller import JoyStickSub
                netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
                car.add(netwkJs, threaded=True)
                ctr.js = netwkJs
        else:
            ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT,
                                     mode=cfg.WEB_INIT_MODE)
        car.add(
            ctr,
            inputs=['cam/image_array'],
            outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
            threaded=True)

    # pilot condition to determine if user or ai are driving
    car.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    # adding the auto-pilot
    if model_type is None:
        model_type = cfg.DEFAULT_MODEL_TYPE
    if model_path:
        kl = dk.utils.get_model_by_type(model_type, cfg)
        kl.load(model_path=model_path)
        if cfg.USE_LIDAR:
            inputs = ['cam/image_array', 'lidar/dist_array']
        else:
            inputs = ['cam/image_array']
        outputs = ['pilot/angle', 'pilot/throttle']
        car.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot')

    # Choose what inputs should change the car.
    car.add(DriveMode(cfg=cfg),
            inputs=[
                'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
                'pilot/throttle'
            ],
            outputs=['angle', 'throttle'])

    # Drive train setup
    if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
        pass
    else:
        steering_controller = PCA9685(cfg.STEERING_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)

        car.add(steering, inputs=['angle'])
        car.add(throttle, inputs=['throttle'])

    # add tub to save data
    if cfg.USE_LIDAR:
        inputs = [
            'cam/image_array', 'lidar/dist_array', 'user/angle',
            'user/throttle', 'user/mode'
        ]
        types = ['image_array', 'nparray', 'float', 'float', 'str']
    else:
        inputs = [
            'cam/image_array', 'user/angle', 'user/throttle', 'user/mode'
        ]
        types = ['image_array', 'float', 'float', 'str']

    # do we want to store new records into own dir or append to existing
    tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
        cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
    tub_writer = TubWriter(base_path=tub_path, inputs=inputs, types=types)
    car.add(tub_writer,
            inputs=inputs,
            outputs=["tub/num_records"],
            run_condition='recording')
    if not model_path and cfg.USE_RC:
        tub_wiper = TubWiper(tub_writer.tub, num_records=cfg.DRIVE_LOOP_HZ)
        car.add(tub_wiper, inputs=['user/wiper_on'])
    # start the car
    car.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #11
0
def drive(cfg, args):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    #Initialize car
    V = dk.vehicle.Vehicle()

    #Camera
    if cfg.DONKEY_GYM:
        from donkeycar.parts.dgym import DonkeyGymEnv
        cfg.GYM_CONF['racer_name'] = args['--name']
        cfg.GYM_CONF['car_name'] = args['--name']
        cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH,
                           host=cfg.SIM_HOST,
                           env_name=cfg.DONKEY_GYM_ENV_NAME,
                           conf=cfg.GYM_CONF,
                           delay=cfg.SIM_ARTIFICIAL_LATENCY)
        inputs = ['steering', 'throttle']
    else:
        from donkeycar.parts.camera import PiCamera
        cam = PiCamera(image_w=cfg.IMAGE_W,
                       image_h=cfg.IMAGE_H,
                       image_d=cfg.IMAGE_DEPTH)
        inputs = []

    V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=True)

    #Controller
    V.add(LineFollower(),
          inputs=['cam/image_array'],
          outputs=['steering', 'throttle', 'recording'])

    #Drive train setup
    if not cfg.DONKEY_GYM:
        from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle

        steering_controller = PCA9685(cfg.STEERING_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               steering_zero_pulse=cfg.STEERING_STOPPED_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               throttle_zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['steering'])
        V.add(throttle, inputs=['throttle'])

    #add tub to save data

    inputs = ['cam/image_array', 'steering', 'throttle']

    types = ['image_array', 'float', 'float']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub,
          inputs=inputs,
          outputs=["tub/num_records"],
          run_condition="recording")

    #run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #12
0
def drive(cfg,
          model_path=None,
          use_joystick=False,
          model_type=None,
          camera_type='single'):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    if model_type is None:
        if cfg.TRAIN_BEHAVIORS:
            model_type = "behavior"
        else:
            model_type = "categorical"

    if model_type == "streamline":
        camera_type = "streamline"

    #Initialize car
    V = dk.vehicle.Vehicle()

    if camera_type == "stereo":

        if cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam

            camA = Webcam(image_w=cfg.IMAGE_W,
                          image_h=cfg.IMAGE_H,
                          image_d=cfg.IMAGE_DEPTH,
                          iCam=0)
            camB = Webcam(image_w=cfg.IMAGE_W,
                          image_h=cfg.IMAGE_H,
                          image_d=cfg.IMAGE_DEPTH,
                          iCam=1)

        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam

            camA = CvCam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         iCam=0)
            camB = CvCam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         iCam=1)
        else:
            raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))

        V.add(camA, outputs=['cam/image_array_a'], threaded=True)
        V.add(camB, outputs=['cam/image_array_b'], threaded=True)

        def stereo_pair(image_a, image_b):
            '''
            This will take the two images and combine them into a single image
            One in red, the other in green, and diff in blue channel.
            '''
            if image_a is not None and image_b is not None:
                width, height, _ = image_a.shape
                grey_a = dk.utils.rgb2gray(image_a)
                grey_b = dk.utils.rgb2gray(image_b)
                grey_c = grey_a - grey_b

                stereo_image = np.zeros([width, height, 3],
                                        dtype=np.dtype('B'))
                stereo_image[..., 0] = np.reshape(grey_a, (width, height))
                stereo_image[..., 1] = np.reshape(grey_b, (width, height))
                stereo_image[..., 2] = np.reshape(grey_c, (width, height))
            else:
                stereo_image = []

            return np.array(stereo_image)

        image_sterero_pair_part = Lambda(stereo_pair)
        V.add(image_sterero_pair_part,
              inputs=['cam/image_array_a', 'cam/image_array_b'],
              outputs=['cam/image_array'])

    #elif camera_type == "streamline":
    #    print("using grayscale pi cam")
    #    assert(cfg.CAMERA_TYPE == "PICAM")
    #    from donkeycar.parts.camera import PiCamera
    #    cam = PiCamera(image_w=cfg.STREAMLINE_IMAGE_W, image_h=cfg.STREAMLINE_IMAGE_H, image_d=1)

    else:

        print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
        if cfg.CAMERA_TYPE == "PICAM":
            from donkeycar.parts.camera import PiCamera
            cam = PiCamera(image_w=cfg.IMAGE_W,
                           image_h=cfg.IMAGE_H,
                           image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam
            cam = Webcam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam
            cam = CvCam(image_w=cfg.IMAGE_W,
                        image_h=cfg.IMAGE_H,
                        image_d=cfg.IMAGE_DEPTH)
        else:
            raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

        V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        from donkeycar.parts.controller import PS3JoystickController, PS4JoystickController

        #cont_class = PS3JoystickController
        cont_class = MyJoystickController

        if cfg.CONTROLLER_TYPE == "ps4":
            cont_class = PS4JoystickController

        ctr = cont_class(throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
                         steering_scale=cfg.JOYSTICK_STEERING_SCALE,
                         auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)

        if cfg.USE_NETWORKED_JS:
            from donkeycar.parts.controller import JoyStickSub
            netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
            V.add(netwkJs, threaded=True)
            ctr.js = netwkJs

    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, and modes, and more.
        ctr = LocalWebController()

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    #this throttle filter will allow one tap back for esc reverse
    th_filter = ThrottleFilter()
    V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    def led_cond(mode, recording, recording_alert, behavior_state,
                 reloaded_model):
        #returns a blink rate. 0 for off. -1 for on. positive for rate.

        if reloaded_model:
            led.set_rgb(cfg.MODEL_RELOADED_LED_R, cfg.MODEL_RELOADED_LED_G,
                        cfg.MODEL_RELOADED_LED_B)
            return 0.1
        else:
            led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)

        if recording_alert:
            led.set_rgb(*recording_alert)
            return cfg.REC_COUNT_ALERT_BLINK_RATE
        else:
            led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)

        if behavior_state is not None and model_type == 'behavior':
            r, g, b = cfg.BEHAVIOR_LED_COLORS[behavior_state]
            led.set_rgb(r, g, b)
            return -1  #solid on

        if recording:
            return -1  #solid on
        elif mode == 'user':
            return 1
        elif mode == 'local_angle':
            return 0.5
        elif mode == 'local':
            return 0.1
        return 0

    if cfg.HAVE_RGB_LED:
        from donkeycar.parts.led_status import RGB_LED
        led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B,
                      cfg.LED_INVERT)
        led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)

        led_cond_part = Lambda(led_cond)
        V.add(led_cond_part,
              inputs=[
                  'user/mode', 'recording', "records/alert", 'behavior/state',
                  'reloaded/model'
              ],
              outputs=['led/blink_rate'])

        V.add(led, inputs=['led/blink_rate'])

    def get_record_alert_color(num_records):
        col = (0, 0, 0)
        for count, color in cfg.RECORD_ALERT_COLOR_ARR:
            if num_records >= count:
                col = color
        return col

    def record_tracker(num_records):

        if num_records is None:
            return 0

        if record_tracker.last_num_rec_print != num_records or record_tracker.force_alert:
            record_tracker.last_num_rec_print = num_records

            if num_records % 10 == 0:
                print("recorded", num_records, "records")

            if num_records % cfg.REC_COUNT_ALERT == 0 or record_tracker.force_alert:
                record_tracker.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
                record_tracker.force_alert = 0

        if record_tracker.dur_alert > 0:
            record_tracker.dur_alert -= 1

        if record_tracker.dur_alert != 0:
            return get_record_alert_color(num_records)

        return 0

    record_tracker.last_num_rec_print = 0
    record_tracker.dur_alert = 0
    record_tracker.force_alert = 0
    rec_tracker_part = Lambda(record_tracker)
    V.add(rec_tracker_part,
          inputs=["tub/num_records"],
          outputs=['records/alert'])

    if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
        #then we are not using the circle button. hijack that to force a record count indication
        def show_record_acount_status():
            record_tracker.last_num_rec_print = 0
            record_tracker.force_alert = 1

        ctr.set_button_down_trigger('circle', show_record_acount_status)

    #IMU
    if cfg.HAVE_IMU:
        imu = Mpu6050()
        V.add(imu,
              outputs=[
                  'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x',
                  'imu/gyr_y', 'imu/gyr_z'
              ],
              threaded=True)

    #Behavioral state
    if cfg.TRAIN_BEHAVIORS:
        bh = BehaviorPart(cfg.BEHAVIOR_LIST)
        V.add(bh,
              outputs=[
                  'behavior/state', 'behavior/label',
                  "behavior/one_hot_state_array"
              ])
        try:
            ctr.set_button_down_trigger('L1', bh.increment_state)
        except:
            pass

        inputs = ['cam/image_array', "behavior/one_hot_state_array"]
    #IMU
    elif model_type == "imu":
        assert (cfg.HAVE_IMU)
        #Run the pilot if the mode is not user.
        inputs = [
            'cam/image_array', 'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
            'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'
        ]
    else:
        inputs = ['cam/image_array']

    def load_model(kl, model_path):
        start = time.time()
        try:
            print('loading model', model_path)
            kl.load(model_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except:
            print('problems loading model', model_path)

    def load_weights(kl, weights_path):
        start = time.time()
        try:
            print('loading model weights', weights_path)
            kl.model.load_weights(weights_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except:
            print('problems loading weights', weights_path)

    def load_model_json(kl, json_fnm):
        start = time.time()
        print('loading model json', json_fnm)
        import keras
        with open(json_fnm, 'r') as handle:
            contents = handle.read()
            kl.model = keras.models.model_from_json(contents)
        print('finished loading json in %s sec.' % (str(time.time() - start)))

    if model_path:
        #When we have a model, first create an appropriate Keras part
        #if model_type == "streamline":
        #kl = KerasStreamline()
        #else:
        kl = dk.utils.get_model_by_type(model_type, cfg)

        if '.h5' in model_path:
            #when we have a .h5 extension
            #load everything from the model file
            load_model(kl, model_path)

            def reload_model(filename):
                print(filename, "was changed!")
                load_model(kl, filename)

            fw_part = FileWatcher(model_path,
                                  reload_model,
                                  wait_for_write_stop=10.0)
            V.add(fw_part, outputs=['reloaded/model'])

        elif '.json' in model_path:
            #when we have a .json extension
            #load the model from their and look for a matching
            #.wts file with just weights
            load_model_json(kl, model_path)
            weights_path = model_path.replace('.json', '.weights')
            load_weights(kl, weights_path)

            def reload_weights(filename):
                print(filename, "was changed!")
                weights_path = filename.replace('.json', '.weights')
                load_weights(kl, weights_path)

            fw_part = FileWatcher(model_path,
                                  reload_weights,
                                  wait_for_write_stop=1.0)
            V.add(fw_part, outputs=['reloaded/model'])

        else:
            #previous default behavior
            load_model(kl, model_path)

        V.add(kl,
              inputs=inputs,
              outputs=['pilot/angle', 'pilot/throttle'],
              run_condition='run_pilot')

    #Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    #Drive train setup

    if cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
        from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle

        steering_controller = PCA9685(cfg.STEERING_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])

    elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM

        steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT,
                                             cfg.HBRIDGE_PIN_RIGHT)
        throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD,
                                             cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])

    elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM

        left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD,
                                               cfg.HBRIDGE_PIN_LEFT_BWD)
        right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD,
                                                cfg.HBRIDGE_PIN_RIGHT_BWD)
        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
              inputs=['throttle', 'angle'],
              outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])

    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
        from donkeycar.parts.actuator import ServoBlaster, PWMSteering
        steering_controller = ServoBlaster(cfg.STEERING_CHANNEL)  #really pin
        #PWM pulse values should be in the range of 100 to 200
        assert (cfg.STEERING_LEFT_PWM <= 200)
        assert (cfg.STEERING_RIGHT_PWM <= 200)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
        motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD,
                                          cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'])
        V.add(motor, inputs=["throttle"])

    #add tub to save data

    inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode']

    types = ['image_array', 'float', 'float', 'str']

    if cfg.TRAIN_BEHAVIORS:
        inputs += [
            'behavior/state', 'behavior/label', "behavior/one_hot_state_array"
        ]
        types += ['int', 'str', 'vector']

    if cfg.HAVE_IMU:
        inputs += [
            'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y',
            'imu/gyr_z'
        ]

        types += ['float', 'float', 'float', 'float', 'float', 'float']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub,
          inputs=inputs,
          outputs=["tub/num_records"],
          run_condition='recording')

    if type(ctr) is LocalWebController:
        print("You can now go to <your pi ip address>:8887 to drive your car.")
    elif isinstance(ctr, JoystickController):
        print("You can now move your joystick to drive your car.")
        #tell the controller about the tub
        ctr.set_tub(tub)

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def train_drive_reinforcement(cfg, args, script_mode):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    model_path = args['--model']
    use_joystick = args['--js']
    meta = args['--meta']
    tub_in = args['--tub']
    model_type = args['--type']
    camera_type = args['--camera']

    vae_path = args['--vae']

    auto_mode = 0
    if args['--auto']:
        auto_mode = 1

    env_type = 'simulator'
    if args['--env']:
        env_type = args['--env']

    print('VAE_PATH: %s' % vae_path)

    global r
    global ctr
    global cam

    if cfg.DONKEY_GYM:
        #the simulator will use cuda and then we usually run out of resources
        #if we also try to use cuda. so disable for donkey_gym.
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    if model_type is None:
        if cfg.TRAIN_LOCALIZER:
            model_type = "localizer"
        elif cfg.TRAIN_BEHAVIORS:
            model_type = "behavior"
        else:
            model_type = cfg.DEFAULT_MODEL_TYPE

    #Initialize car
    V = dk.vehicle.Vehicle()

    print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
    if camera_type == "stereo":

        if cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam

            camA = Webcam(image_w=cfg.IMAGE_W,
                          image_h=cfg.IMAGE_H,
                          image_d=cfg.IMAGE_DEPTH,
                          iCam=0)
            camB = Webcam(image_w=cfg.IMAGE_W,
                          image_h=cfg.IMAGE_H,
                          image_d=cfg.IMAGE_DEPTH,
                          iCam=1)

        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam

            camA = CvCam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         iCam=0)
            camB = CvCam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         iCam=1)
        else:
            raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))

        V.add(camA, outputs=['cam/image_array_a'], threaded=True)
        V.add(camB, outputs=['cam/image_array_b'], threaded=True)

        from donkeycar.parts.image import StereoPair

        V.add(StereoPair(),
              inputs=['cam/image_array_a', 'cam/image_array_b'],
              outputs=['cam/image_array'])
    elif cfg.CAMERA_TYPE == "D435":
        from donkeycar.parts.realsense435i import RealSense435i
        cam = RealSense435i(enable_rgb=cfg.REALSENSE_D435_RGB,
                            enable_depth=cfg.REALSENSE_D435_DEPTH,
                            enable_imu=cfg.REALSENSE_D435_IMU,
                            device_id=cfg.REALSENSE_D435_ID)
        V.add(cam,
              inputs=[],
              outputs=[
                  'cam/image_array', 'cam/depth_array', 'imu/acl_x',
                  'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y',
                  'imu/gyr_z'
              ],
              threaded=True)

    else:
        if False and cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv

        inputs = []
        threaded = True
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv
            cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH,
                               host=cfg.SIM_HOST,
                               env_name=cfg.DONKEY_GYM_ENV_NAME,
                               conf=cfg.GYM_CONF,
                               delay=cfg.SIM_ARTIFICIAL_LATENCY)
            threaded = True
            inputs = ['angle', 'throttle']
        elif cfg.CAMERA_TYPE == "PICAM":
            from donkeycar.parts.camera import PiCamera
            cam = PiCamera(image_w=cfg.IMAGE_W,
                           image_h=cfg.IMAGE_H,
                           image_d=cfg.IMAGE_DEPTH,
                           framerate=cfg.CAMERA_FRAMERATE,
                           vflip=cfg.CAMERA_VFLIP,
                           hflip=cfg.CAMERA_HFLIP)
        elif cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam
            cam = Webcam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam
            cam = CvCam(image_w=cfg.IMAGE_W,
                        image_h=cfg.IMAGE_H,
                        image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CSIC":
            from donkeycar.parts.camera import CSICamera
            cam = CSICamera(image_w=cfg.IMAGE_W,
                            image_h=cfg.IMAGE_H,
                            image_d=cfg.IMAGE_DEPTH,
                            framerate=cfg.CAMERA_FRAMERATE,
                            gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
        elif cfg.CAMERA_TYPE == "V4L":
            from donkeycar.parts.camera import V4LCamera
            cam = V4LCamera(image_w=cfg.IMAGE_W,
                            image_h=cfg.IMAGE_H,
                            image_d=cfg.IMAGE_DEPTH,
                            framerate=cfg.CAMERA_FRAMERATE)
        elif cfg.CAMERA_TYPE == "MOCK":
            from donkeycar.parts.camera import MockCamera
            cam = MockCamera(image_w=cfg.IMAGE_W,
                             image_h=cfg.IMAGE_H,
                             image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "IMAGE_LIST":
            from donkeycar.parts.camera import ImageListCamera
            cam = ImageListCamera(path_mask=cfg.PATH_MASK)
        else:
            raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

        V.add(cam,
              inputs=inputs,
              outputs=['cam/image_array'],
              threaded=threaded)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        if cfg.CONTROLLER_TYPE == "MM1":
            from donkeycar.parts.robohat import RoboHATController
            ctr = RoboHATController(cfg)
        elif "custom" == cfg.CONTROLLER_TYPE:
            #
            # custom controller created with `donkey createjs` command
            #
            from my_joystick import MyJoystickController
            ctr = MyJoystickController(
                throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
                throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
                steering_scale=cfg.JOYSTICK_STEERING_SCALE,
                auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
            ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
        else:
            from donkeycar.parts.controller import get_js_controller

            ctr = get_js_controller(cfg)

            if cfg.USE_NETWORKED_JS:
                from donkeycar.parts.controller import JoyStickSub
                netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
                V.add(netwkJs, threaded=True)
                ctr.js = netwkJs

        V.add(
            ctr,
            inputs=['cam/image_array'],
            outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
            threaded=True)

    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, and modes, and more.
        ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT,
                                 mode=cfg.WEB_INIT_MODE)

        V.add(
            ctr,
            inputs=['cam/image_array', 'tub/num_records'],
            outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
            threaded=True)

    #this throttle filter will allow one tap back for esc reverse
    th_filter = ThrottleFilter()
    V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    class PilotCondition:
        def run(self, mode):
            if mode == 'user':
                return False
            else:
                return True

    V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    class LedConditionLogic:
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, mode, recording, recording_alert, behavior_state,
                model_file_changed, track_loc):
            #returns a blink rate. 0 for off. -1 for on. positive for rate.

            if track_loc is not None:
                led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
                return -1

            if model_file_changed:
                led.set_rgb(self.cfg.MODEL_RELOADED_LED_R,
                            self.cfg.MODEL_RELOADED_LED_G,
                            self.cfg.MODEL_RELOADED_LED_B)
                return 0.1
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if recording_alert:
                led.set_rgb(*recording_alert)
                return self.cfg.REC_COUNT_ALERT_BLINK_RATE
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if behavior_state is not None and model_type == 'behavior':
                r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
                led.set_rgb(r, g, b)
                return -1  #solid on

            if recording:
                return -1  #solid on
            elif mode == 'user':
                return 1
            elif mode == 'local_angle':
                return 0.5
            elif mode == 'local':
                return 0.1
            return 0

    if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
        from donkeycar.parts.led_status import RGB_LED
        led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B,
                      cfg.LED_INVERT)
        led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)

        V.add(LedConditionLogic(cfg),
              inputs=[
                  'user/mode', 'recording', "records/alert", 'behavior/state',
                  'modelfile/modified', "pilot/loc"
              ],
              outputs=['led/blink_rate'])

        V.add(led, inputs=['led/blink_rate'])

    def get_record_alert_color(num_records):
        col = (0, 0, 0)
        for count, color in cfg.RECORD_ALERT_COLOR_ARR:
            if num_records >= count:
                col = color
        return col

    class RecordTracker:
        def __init__(self):
            self.last_num_rec_print = 0
            self.dur_alert = 0
            self.force_alert = 0

        def run(self, num_records):
            if num_records is None:
                return 0

            if self.last_num_rec_print != num_records or self.force_alert:
                self.last_num_rec_print = num_records

                if num_records % 10 == 0:
                    print("recorded", num_records, "records")

                if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
                    self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
                    self.force_alert = 0

            if self.dur_alert > 0:
                self.dur_alert -= 1

            if self.dur_alert != 0:
                return get_record_alert_color(num_records)

            return 0

    rec_tracker_part = RecordTracker()
    V.add(rec_tracker_part,
          inputs=["tub/num_records"],
          outputs=['records/alert'])

    if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
        #then we are not using the circle button. hijack that to force a record count indication
        def show_record_acount_status():
            rec_tracker_part.last_num_rec_print = 0
            rec_tracker_part.force_alert = 1

        ctr.set_button_down_trigger('circle', show_record_acount_status)

    #Sombrero
    if cfg.HAVE_SOMBRERO:
        from donkeycar.parts.sombrero import Sombrero
        s = Sombrero()

    #IMU
    if cfg.HAVE_IMU:
        from donkeycar.parts.imu import IMU
        imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
        V.add(imu,
              outputs=[
                  'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x',
                  'imu/gyr_y', 'imu/gyr_z'
              ],
              threaded=True)

    class ImgPreProcess():
        '''
        preprocess camera image for inference.
        normalize and crop if needed.
        '''
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, img_arr):
            return normalize_and_crop(img_arr, self.cfg)

    if "coral" in model_type:
        inf_input = 'cam/image_array'
    else:
        inf_input = 'cam/normalized/cropped'
        V.add(ImgPreProcess(cfg),
              inputs=['cam/image_array'],
              outputs=[inf_input],
              run_condition='run_pilot')

    # Use the FPV preview, which will show the cropped image output, or the full frame.
    if False and cfg.USE_FPV:
        V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)

    #Behavioral state
    if cfg.TRAIN_BEHAVIORS:
        bh = BehaviorPart(cfg.BEHAVIOR_LIST)
        V.add(bh,
              outputs=[
                  'behavior/state', 'behavior/label',
                  "behavior/one_hot_state_array"
              ])
        try:
            ctr.set_button_down_trigger('L1', bh.increment_state)
        except:
            pass

        inputs = [inf_input, "behavior/one_hot_state_array"]
    #IMU
    elif model_type == "imu":
        assert (cfg.HAVE_IMU)
        #Run the pilot if the mode is not user.
        inputs = [
            inf_input, 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x',
            'imu/gyr_y', 'imu/gyr_z'
        ]
    else:
        inputs = [inf_input]

    def load_model(kl, model_path):
        start = time.time()
        print('loading model', model_path)
        kl.load(model_path)
        print('finished loading in %s sec.' % (str(time.time() - start)))

    #Choose what inputs should change the car.
    class DriveMode:
        def run(self, mode, user_angle, user_throttle, pilot_angle,
                pilot_throttle):
            if mode == 'user':
                return user_angle, user_throttle

            elif mode == 'local_angle':
                return pilot_angle, user_throttle

            else:
                #return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT
                return user_angle, user_throttle

    V.add(DriveMode(),
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    #to give the car a boost when starting ai mode in a race.
    aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE,
                          cfg.AI_LAUNCH_KEEP_ENABLED)

    V.add(aiLauncher, inputs=['user/mode', 'throttle'], outputs=['throttle'])

    if isinstance(ctr, JoystickController):
        ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON,
                                    aiLauncher.enable_ai_launch)

    class AiRunCondition:
        '''
        A bool part to let us know when ai is running.
        '''
        def run(self, mode):
            if mode == "user":
                return False
            return True

    V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])

    #Ai Recording
    class AiRecordingCondition:
        '''
        return True when ai mode, otherwize respect user mode recording flag
        '''
        def run(self, mode, recording):
            if mode == 'user':
                return recording
            return True

    if cfg.RECORD_DURING_AI:
        V.add(AiRecordingCondition(),
              inputs=['user/mode', 'recording'],
              outputs=['recording'])

    #Drive train setup
    if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
        pass
    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
        from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle

        steering_controller = PCA9685(cfg.STEERING_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['angle'], threaded=True)
        V.add(throttle, inputs=['throttle'], threaded=True)

    elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM

        steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT,
                                             cfg.HBRIDGE_PIN_RIGHT)
        throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD,
                                             cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])

    elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM

        left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD,
                                               cfg.HBRIDGE_PIN_LEFT_BWD)
        right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD,
                                                cfg.HBRIDGE_PIN_RIGHT_BWD)
        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
              inputs=['throttle', 'angle'],
              outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])

    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
        from donkeycar.parts.actuator import ServoBlaster, PWMSteering
        steering_controller = ServoBlaster(cfg.STEERING_CHANNEL)  #really pin
        #PWM pulse values should be in the range of 100 to 200
        assert (cfg.STEERING_LEFT_PWM <= 200)
        assert (cfg.STEERING_RIGHT_PWM <= 200)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
        motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD,
                                          cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'], threaded=True)
        V.add(motor, inputs=["throttle"])

    elif cfg.DRIVE_TRAIN_TYPE == "MM1":
        from donkeycar.parts.robohat import RoboHATDriver
        V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])

    elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
        from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
        steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN,
                                         freq=cfg.STEERING_PWM_FREQ,
                                         inverted=cfg.STEERING_PWM_INVERTED)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN,
                                         freq=cfg.THROTTLE_PWM_FREQ,
                                         inverted=cfg.THROTTLE_PWM_INVERTED)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)
        V.add(steering, inputs=['angle'], threaded=True)
        V.add(throttle, inputs=['throttle'], threaded=True)

    # OLED setup
    if cfg.USE_SSD1306_128_32:
        from donkeycar.parts.oled import OLEDPart
        auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
        oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM,
                             auto_record_on_throttle=auto_record_on_throttle)
        V.add(oled_part,
              inputs=['recording', 'tub/num_records', 'user/mode'],
              outputs=[],
              threaded=True)

    #add tub to save data

    inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode']

    types = ['image_array', 'float', 'float', 'str']

    if cfg.TRAIN_BEHAVIORS:
        inputs += [
            'behavior/state', 'behavior/label', "behavior/one_hot_state_array"
        ]
        types += ['int', 'str', 'vector']

    if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
        inputs += ['cam/depth_array']
        types += ['gray16_array']

    if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
        inputs += [
            'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y',
            'imu/gyr_z'
        ]

        types += ['float', 'float', 'float', 'float', 'float', 'float']

    if cfg.RECORD_DURING_AI:
        inputs += ['pilot/angle', 'pilot/throttle']
        types += ['float', 'float']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
    V.add(tub,
          inputs=inputs,
          outputs=["tub/num_records"],
          run_condition='recording')

    if cfg.PUB_CAMERA_IMAGES:
        from donkeycar.parts.network import TCPServeValue
        from donkeycar.parts.image import ImgArrToJpg
        pub = TCPServeValue("camera")
        V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
        V.add(pub, inputs=['jpg/bin'])

    if type(ctr) is LocalWebController:
        if cfg.DONKEY_GYM:
            print("You can now go to http://localhost:%d to drive your car." %
                  cfg.WEB_CONTROL_PORT)
        else:
            print(
                "You can now go to <your hostname.local>:%d to drive your car."
                % cfg.WEB_CONTROL_PORT)
    elif isinstance(ctr, JoystickController):
        print("You can now move your joystick to drive your car.")
        #tell the controller about the tub
        ctr.set_tub(tub)

        if cfg.BUTTON_PRESS_NEW_TUB:

            def new_tub_dir():
                V.parts.pop()
                tub = th.new_tub_writer(inputs=inputs,
                                        types=types,
                                        user_meta=meta)
                V.add(tub,
                      inputs=inputs,
                      outputs=["tub/num_records"],
                      run_condition='recording')
                ctr.set_tub(tub)

            ctr.set_button_down_trigger('cross', new_tub_dir)
        ctr.print_controls()

    if script_mode == 'train':
        _thread.start_new_thread(
            observe_and_learn,
            (cfg, model_path, vae_path, auto_mode, env_type))

        _thread.start_new_thread(save_reconstruction, (cfg, ))
    elif script_mode == 'drive':
        _thread.start_new_thread(drive_model,
                                 (cfg, model_path, vae_path, env_type))
    elif script_mode == 'optimize':
        _thread.start_new_thread(
            optimize_model, (cfg, model_path, vae_path, auto_mode, env_type))
    elif script_mode == 'train_vae':
        print('collecting data for vae training...')

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #14
0
def drive(cfg, model_path=None, use_joystick=False, use_chaos=False):
    """
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    """

    V = dk.vehicle.Vehicle()

    clock = Timestamp()
    V.add(clock, outputs='timestamp')

    cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        ctr = JoystickController(
            max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
            steering_scale=cfg.JOYSTICK_STEERING_SCALE,
            auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
    else:
        # This web controller will create a web server that is capable
        # of managing steering, throttle, and modes, and more.
        ctr = LocalWebController(use_chaos=use_chaos)

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    # See if we should even run the pilot module.
    # This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    class Timer:
        def __init__(self, loops_timed):
            self.start_time = time.time()
            self.loop_counter = 0
            self.loops_timed = loops_timed

        def run(self):
            self.loop_counter += 1
            if self.loop_counter == self.loops_timed:
                seconds = time.time() - self.start_time
                print("{} loops takes {} seconds".format(
                    self.loops_timed, seconds))
                self.loop_counter = 0
                self.start_time = time.time()

    timer = Timer(50)
    V.add(timer)

    #Part to save multiple image arrays from camera
    class ImageArrays:
        def __init__(self, steps):
            tmp = np.zeros((120, 160, 3))
            self.steps = steps
            self.storage = [tmp for i in range(self.steps * 2 + 1)]
            self.active_images = [tmp for i in range(3)]

        def run(self, image):
            self.storage.pop(0)
            self.storage.append(image)
            for i in range(3):
                self.active_images[i] = self.storage[i * self.steps]

            return np.array(self.active_images)

    image_arrays = ImageArrays(10)
    V.add(image_arrays,
          inputs=['cam/image_array'],
          outputs=['cam/image_arrays'])

    # Run the pilot if the mode is not user.
    kl = KerasCategorical()
    if model_path:
        kl.load(model_path)

    V.add(kl,
          inputs=['cam/image_arrays'],
          outputs=['pilot/angle', 'pilot/throttle'],
          run_condition='run_pilot')

    # Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    steering_controller = PCA9685(cfg.STEERING_CHANNEL)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    # add tub to save data
    inputs = [
        'cam/image_array', 'user/angle', 'user/throttle', 'user/mode',
        'timestamp'
    ]
    types = ['image_array', 'float', 'float', 'str', 'str']

    #multiple tubs
    #th = TubHandler(path=cfg.DATA_PATH)
    #tub = th.new_tub_writer(inputs=inputs, types=types)

    # single tub
    tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    # run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #15
0
def drive(cfg,
          model_path=None,
          use_joystick=False,
          model_type=None,
          camera_type='single',
          meta=[]):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''
    #zd d
    if model_type is None:
        if cfg.TRAIN_LOCALIZER:
            model_type = "localizer"
        elif cfg.TRAIN_BEHAVIORS:
            model_type = "behavior"
        else:
            model_type = cfg.DEFAULT_MODEL_TYPE

    #Initialize car
    V = dk.vehicle.Vehicle()

    #zd d
    #1 ********************************************* camera ********************************************
    inputs = []
    threaded = True
    print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
    if cfg.CAMERA_TYPE == "PICAM":
        from donkeycar.parts.camera import PiCamera
        cam = PiCamera(image_w=cfg.IMAGE_W,
                       image_h=cfg.IMAGE_H,
                       image_d=cfg.IMAGE_DEPTH)
    #zd d
    else:
        raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

    V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)

    #2 ********************************************* controller ********************************************
    #This web controller will create a web server that is capable
    #of managing steering, throttle, and modes, and more.
    ctr = LocalWebController()

    if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
        #then we are not using the circle button. hijack that to force a record count indication
        def show_record_acount_status():
            rec_tracker_part.last_num_rec_print = 0
            rec_tracker_part.force_alert = 1

        ctr.set_button_down_trigger('circle', show_record_acount_status)

    V.add(
        ctr,
        inputs=['cam/image_array'],
        #outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
        outputs=[
            'user/angle', 'user/throttle', 'user/mode', 'recording',
            'change_model', 'user/arrival'
        ],
        #outputs=['user/angle', 'user/throttle', 'user/mode', 'recording', 'change_model'], #zd
        threaded=True)

    # ********************************************* communication ********************************************
    class communication_class:
        def __init__(self):
            self.recv = ''
            self.arrived = 0
            # ser = serial.Serial("/dev/ttyUSB0", 9600)
            self.serial = serial.Serial('/dev/ttyUSB0', 9600, timeout=3600)
            # serial = serial.Serial('COM9', 9600, timeout=3600)
            if self.serial.isOpen():
                print("open success")
            else:
                print("open failed")

            data_judge = '0'
            while data_judge != b'\r\nOK\r\n':
                send_data = 'AT'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\nOK\r\n':
                send_data = 'AT+CMEE=1'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\nQuectel\r\n\r\nOK\r\n':
                send_data = 'AT+CGMI'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\nBC95HB-02-STD_900\r\n\r\nOK\r\n':
                send_data = 'AT+CGMM'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\nSECURITY,V100R100C10B657SP3\r\n\r\nPROTOCOL,V100R100C10B657SP3\r\n\r\nAPPLICATION,V100R100C10B657SP3\r\n\r\nSEC_UPDATER,V100R100C10B657SP3\r\n\r\nAPP_UPDATER,V100R100C10B657SP3\r\n\r\nRADIO,BC95HB-02-STD_900\r\n\r\nOK\r\n':
                send_data = 'AT+CGMR'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n+NBAND:8\r\n\r\nOK\r\n':
                send_data = 'AT+NBAND?'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n+NCONFIG:AUTOCONNECT,TRUE\r\n+NCONFIG:CR_0354_0338_SCRAMBLING,TRUE\r\n+NCONFIG:CR_0859_SI_AVOID,TRUE\r\n+NCONFIG:COMBINE_ATTACH,FALSE\r\n+NCONFIG:CELL_RESELECTION,FALSE\r\n+NCONFIG:ENABLE_BIP,FALSE\r\n\r\nOK\r\n':
                send_data = 'AT+NCONFIG?'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n+CGSN:869405035846048\r\n\r\nOK\r\n':
                send_data = 'AT+CGSN=1'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\nOK\r\n':
                send_data = 'AT+CFUN=1'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n460043193006443\r\n\r\nOK\r\n':
                send_data = 'AT+CIMI'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\nOK\r\n':
                send_data = 'AT+CGATT=0'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n+CGATT:0\r\n\r\nOK\r\n':
                send_data = 'AT+CGATT?'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\nOK\r\n':
                send_data = 'AT+CGATT=1'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n+CGATT:1\r\n\r\nOK\r\n':
                send_data = 'AT+CGATT?'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = b'\r\n+CSQ:99,99\r\n\r\nOK\r\n'
            while data_judge == b'\r\n+CSQ:99,99\r\n\r\nOK\r\n':
                send_data = 'AT+CSQ'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n+COPS:0,2,"46000"\r\n\r\nOK\r\n':
                send_data = 'AT+COPS?'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

            data_judge = '0'
            while data_judge != b'\r\n+CEREG:0,1\r\n\r\nOK\r\n':
                send_data = 'AT+CEREG?'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)
            print('nb-iot initial ok!')

        def update(self):
            while True:
                try:
                    data_judge = '0'
                    # sleep(1)
                    while data_judge != b'\r\n0\r\n\r\nOK\r\n' and data_judge != b'\r\n+CME ERROR: 4\r\n':
                        send_data = 'AT+NSOCR=DGRAM,17,25535,1'
                        print(send_data)
                        send_data = send_data + '\r\n'
                        self.serial.write(send_data.encode())
                        data = self.serial.read(1)
                        sleep(0.5)
                        data_judge = data + self.serial.read(
                            self.serial.inWaiting())
                        print(data_judge)

                    # line = line.splitlines(True)
                    legal = 'OK'
                    data = {'order': 'ask'}
                    data = json.dumps(data)
                    l = len(data)
                    judge = '0'
                    while judge != legal:
                        send_data = "AT+NSOST=0,139.199.105.136,25535," + str(
                            len(data)) + "," + (binascii.b2a_hex(
                                str(data).encode('utf-8'))).decode()
                        print(send_data)
                        send_data = send_data + '\r\n'
                        self.serial.write(send_data.encode())
                        data = self.serial.read(1)
                        # line = ser.read(ser.inWaiting())
                        # line = line.decode()
                        sleep(2)
                        # line = line.splitlines(True)
                        data = data + self.serial.read(self.serial.inWaiting())
                        data = str(data).split("\\r\\n")
                        print(data)
                        judge = data[3]

                    if len(data) > 5:
                        print("进入判断")
                        data = data[5]
                        l = data[10:len(data)]

                        print(l)
                        data_judge = '0'
                        while data_judge != b'\r\n\r\nOK\r\n':
                            print("发送请求")
                            send_data = 'AT+NSORF=0,' + l + "\r\n"
                            print(send_data)
                            # send_data = send_data + '\r\n'
                            self.serial.write(send_data.encode())
                            data = self.serial.read(1)
                            sleep(1)
                            data = data + self.serial.read(
                                self.serial.inWaiting())
                            # print(data.decode('utf-8'))
                            data_judge = data[len(data) - 8:len(data)]
                            data = str(data).split(",")
                            # print((bytearray.fromhex(data[4])).decode('utf-8'))
                            data = (bytearray.fromhex(data[4])).decode('utf-8')
                            data = json.loads(data)
                            self.recv = data
                            print("receive:")
                            print(data)
                    #!!! change car state when arrive
                    # if has_arrived=='arrive':
                    #     print("!!!!!!car_state change to c(waiting to get)")
                    #     judge = '0'
                    #     data = {'order': 'car_state'}
                    #     data = json.dumps(data)
                    #     l = len(data)
                    #     legal = "\r\n0," + str(len(data)) + "\r\n\r\nOK\r\n"
                    #
                    #     while judge != legal.encode('utf-8'):
                    #         send_data = "AT+NSOST=0,139.199.105.136,25535," + str(len(data)) + "," + (
                    #             binascii.b2a_hex(str(data).encode('utf-8'))).decode()
                    #         print(send_data)
                    #         send_data = send_data + '\r\n'
                    #         self.serial.write(send_data.encode())
                    #         data = self.serial.read(1)
                    #         sleep(2)
                    #         data = data + self.serial.read(self.serial.inWaiting())
                    #         #print(data)
                    #         judge = data[0:14]

                    data_judge = '0'
                    # sleep(1)
                    while data_judge != b'\r\nOK\r\n':
                        # line = ser.read(ser.inWaiting())
                        # line = line.decode()

                        send_data = 'AT+NSOCL=0'
                        print(send_data)
                        send_data = send_data + '\r\n'
                        self.serial.write(send_data.encode())
                        data = self.serial.read(1)
                        sleep(0.5)
                        data_judge = data + self.serial.read(
                            self.serial.inWaiting())
                        print(data_judge)
                except:
                    while data_judge != b'\r\nOK\r\n':
                        send_data = 'AT+NSOCL=0'
                        print(send_data)
                        send_data = send_data + '\r\n'
                        self.serial.write(send_data.encode())
                        data = self.serial.read(1)
                        sleep(0.5)
                        data_judge = data + self.serial.read(
                            self.serial.inWaiting())
                        print(data_judge)

        def run_threaded(self, has_arrived):
            # !!! change car state when arrive
            if has_arrived == 'arrive':
                if self.arrived == 0:
                    self.arrived += 1
                    print("!!!!!!car_state change to c(waiting to get)")
                    try:
                        data_judge = '0'
                        # sleep(1)
                        while data_judge != b'\r\n0\r\n\r\nOK\r\n' and data_judge != b'\r\n+CME ERROR: 4\r\n':
                            send_data = 'AT+NSOCR=DGRAM,17,25535,1'
                            print(send_data)
                            send_data = send_data + '\r\n'
                            self.serial.write(send_data.encode())
                            data = self.serial.read(1)
                            sleep(0.5)
                            data_judge = data + self.serial.read(
                                self.serial.inWaiting())
                            print(data_judge)

                        judge = '0'
                        data = {'order': 'car_state'}
                        data = json.dumps(data)
                        l = len(data)
                        legal = "\r\n0," + str(len(data)) + "\r\n\r\nOK\r\n"

                        while judge != legal.encode('utf-8'):
                            send_data = "AT+NSOST=0,139.199.105.136,25535," + str(
                                len(data)) + "," + (binascii.b2a_hex(
                                    str(data).encode('utf-8'))).decode()
                            print(send_data)
                            send_data = send_data + '\r\n'
                            self.serial.write(send_data.encode())
                            data = self.serial.read(1)
                            #sleep(2)
                            sleep(1)
                            data = data + self.serial.read(
                                self.serial.inWaiting())
                            # print(data)
                            judge = data[0:14]
                    except:
                        print("send arrive error!")
            else:
                self.arrived = 0
            tmp = self.recv
            if tmp != '':
                return tmp['box_state'], tmp['car_state'], tmp['pos']
            else:
                #!!!!
                return ['open', 'open', 'open'], 'a', 'a'

        def shuwtdonw(self):
            #关闭socket
            GPIO.cleanup()
            data_judge = '0'
            # sleep(1)
            print("clean udp socket")
            while data_judge != b'\r\nOK\r\n':
                send_data = 'AT+NSOCL=0'
                print(send_data)
                send_data = send_data + '\r\n'
                self.serial.write(send_data.encode())
                data = self.serial.read(1)
                sleep(0.5)
                data_judge = data + self.serial.read(self.serial.inWaiting())
                print(data_judge)

    communication = communication_class()
    #!!!
    V.add(communication,
          inputs=['arrive_signal'],
          outputs=['lock_state', 'car_state', 'pos'],
          threaded=True)

    # ********************************************* lock_ctr ********************************************
    class lock_ctr_class:
        def __init__(self):
            GPIO.setmode(GPIO.BOARD)
            GPIO.setup(36, GPIO.OUT)  # 1
            GPIO.setmode(GPIO.BOARD)
            GPIO.setup(38, GPIO.OUT)  # 2
            GPIO.setmode(GPIO.BOARD)
            GPIO.setup(40, GPIO.OUT)  # 3

        def run(self, state):
            #1
            if state[0] == 'close':
                GPIO.output(36, True)
            else:
                GPIO.output(36, False)

            #2
            if state[1] == 'close':
                GPIO.output(38, True)
            else:
                GPIO.output(38, False)

            #3
            if state[2] == 'close':
                GPIO.output(40, True)
            else:
                GPIO.output(40, False)
            # for x in range(3): # 0 1 2
            #     if state[x]=='open':
            #         GPIO.output(11+x, False)
            #     else: # close
            #         GPIO.output(11+x, True)
        def shutdown(self):
            GPIO.cleanup()

    lock_ctr = lock_ctr_class()
    V.add(lock_ctr, inputs=['lock_state'], outputs=[])

    # ********************************************* throttle filter ********************************************
    #this throttle filter will allow one tap back for esc reverse
    #th_filter = ThrottleFilter()
    #V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    #
    #3 ********************************************* PilotCondition ********************************************
    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    # class PilotCondition:
    #     def run(self, mode):
    #         if mode == 'user':
    #             return False
    #         else:
    #             return True
    #
    # V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
    #!!!!
    class PilotCondition:
        def run(self, car_state):
            if car_state == 'a' or car_state == '':
                return False, 'user'
            else:
                return True, 'local_pilot'

    V.add(PilotCondition(),
          inputs=['car_state'],
          outputs=['run_pilot', 'user/mode'])

    #zd d

    #4 ********************************************* RecordTracker ********************************************
    def get_record_alert_color(num_records):
        col = (0, 0, 0)
        for count, color in cfg.RECORD_ALERT_COLOR_ARR:
            if num_records >= count:
                col = color
        return col

    class RecordTracker:
        def __init__(self):
            self.last_num_rec_print = 0
            self.dur_alert = 0
            self.force_alert = 0

        def run(self, num_records):
            if num_records is None:
                return 0

            if self.last_num_rec_print != num_records or self.force_alert:
                self.last_num_rec_print = num_records

                if num_records % 10 == 0:
                    print("recorded", num_records, "records")

                if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
                    self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
                    self.force_alert = 0

            if self.dur_alert > 0:
                self.dur_alert -= 1

            if self.dur_alert != 0:
                return get_record_alert_color(num_records)

            return 0

    rec_tracker_part = RecordTracker()
    V.add(rec_tracker_part,
          inputs=["tub/num_records"],
          outputs=['records/alert'])

    #zd d

    #5 ********************************************* model ********************************************
    # important
    def load_model(kl, model_path):
        start = time.time()
        try:
            print('loading model', model_path)
            kl.load(model_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except Exception as e:
            print(e)
            print('ERR>> problems loading model', model_path)

    def load_weights(kl, weights_path):
        start = time.time()
        try:
            print('loading model weights', weights_path)
            kl.model.load_weights(weights_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except Exception as e:
            print(e)
            print('ERR>> problems loading weights', weights_path)

    def load_model_json(kl, json_fnm):
        start = time.time()
        print('loading model json', json_fnm)
        from tensorflow.python import keras
        try:
            with open(json_fnm, 'r') as handle:
                contents = handle.read()
                kl.model = keras.models.model_from_json(contents)
            print('finished loading json in %s sec.' %
                  (str(time.time() - start)))
        except Exception as e:
            print(e)
            print("ERR>> problems loading model json", json_fnm)

    if model_path:
        #When we have a model, first create an appropriate Keras part
        # most time
        kl = dk.utils.get_model_by_type(model_type, cfg)
        # model_type 默认是linear
        model_reload_cb = None

        if '.h5' in model_path:
            #when we have a .h5 extension
            #load everything from the model file
            # most time
            load_model(kl, model_path)

            def reload_model(filename):
                print("reloading start")
                #zd
                V.sudden_control(0)
                #print('***************', V.mem.items())
                #V.mem.put(['angle', 'throttle', 'pilot/angle', 'pilot/throttle', 'user/angle', 'user/throttle'], [0, 0, 0, 0, 0 ,0])
                #print('***************', V.mem.items())
                load_model(kl, filename)
                print(
                    '***************\r\nreloading is done\r\n**************\r\n'
                )

            model_reload_cb = reload_model
        else:
            print("ERR>> Unknown extension type on model file!!")
            return

        #these parts will reload the model file, but only when ai is running so we don't interrupt user driving
        #V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
        #V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")
        #zd
        #V.add(TriggeredCallback("./models/mypilot_2.h5", model_reload_cb), inputs=["modelfile/reload","change_model"], run_condition="ai_running")
        #V.add(TriggeredCallback("./models/mypilot_2.h5", model_reload_cb), inputs=["modelfile/reload", "change_model"])

        inputs = ['cam/image_array']
        outputs = ['pilot/angle', 'pilot/throttle', 'pilot/arrival']

        V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot')

    # ********************************************** turn_delay_trigger ************************************************
    class turn_delay_class:
        def __init__(self, pause_time):
            self.pause = False
            self.trigger = 0
            self.pause_time = pause_time
            self.now_direc = 0
            #self.direc_list = ['S'] #p is pause
            self.direc_list = ['L']
            #self.on = False
        def update(self):
            while 1:
                if self.pause == True:
                    sleep(2)
                    self.pause = False
                if self.trigger == 1:
                    #self.on = False
                    #time.sleep(3)
                    #self.on = True
                    time.sleep(self.pause_time)
                    self.trigger = 0
                    self.now_direc += 1

        def run_threaded(self, arrival_signal, mode, has_arrived, pos,
                         car_state):
            # >0 = arrive   <0 = not arrive
            if self.pause == True:
                return True, 'P', ''

            if self.trigger == 1:
                return True, self.direc_list[self.now_direc], ''

            # new deal
            if mode == 'user':
                #if car_state=='a':  #待放
                self.now_direc = 0
                self.trigger = 0
                self.pause = False
                if pos == 'a':
                    self.direc_list = ['L']
                elif pos == 'b':
                    self.direc_list = ['S']
                print('~~~~~~~~~~~~~~~~~~~direction restart')
                return False, '', ''

            if has_arrived == 'arrive':
                return False, '', 'arrive'

            if arrival_signal > 0:
                # judge arrive
                # print("now is {}".format(self.now_direc))
                # if self.now_direc >= len(self.direc_list):
                #     print("arrive--------------------------")
                #     return False, '', 'arrive'
                if self.now_direc >= len(self.direc_list):
                    print("arrive--------------------------")
                    return False, '', 'arrive'

                self.trigger = 1
                self.pause = True
                print("**************************start turn!!!, now is {}".
                      format(self.direc_list[self.now_direc]))
                return True, self.direc_list[self.now_direc], ''
            else:
                return False, '', ''

        def shutdown(self):
            return

    turn_delay = turn_delay_class(1.65)
    #turn_delay = turn_delay_class(2)
    #V.add(turn_delay, inputs=['pilot/arrival', 'user/mode'], outputs=['turn/mode', 'turn_direction'], run_condition='run_pilot', threaded=True)
    V.add(turn_delay,
          inputs=[
              'pilot/arrival', 'user/mode', 'arrive_signal', 'pos', 'car_state'
          ],
          outputs=['turn/mode', 'turn_direction', 'arrive_signal'],
          threaded=True)

    # ********************************************** turn_module ************************************************
    class turn_module_class:
        def __init__(self):
            pass

        def run(self, direc):
            if direc == 'P':  #pause
                return 0, 0
            elif direc == 'L':  #left
                return -1, 1
            elif direc == 'R':  #right
                return 1, -1
            elif direc == 'S':  #straight
                return 0.4, 0.4
            else:  #pause
                return 0, 0

        def shutdown(self):
            return

    turn_module = turn_module_class()
    V.add(turn_module,
          inputs=['turn_direction'],
          outputs=['turn/angle', 'turn/throttle'],
          run_condition='run_pilot')

    # pause module input=[] out=[pause/mode]

    #6 ********************************************* DriveMode ********************************************
    #Choose what inputs should change the car.
    class DriveMode:
        def run(self, mode, turn_mode, user_angle, user_throttle, pilot_angle,
                pilot_throttle, turn_angle, turn_throttle, arrive_signal):
            if turn_mode == True:
                return turn_angle, turn_throttle

            if mode == 'user':
                return user_angle, user_throttle

            elif mode == 'local_angle':
                return pilot_angle, user_throttle

            else:  # run_pilot
                if arrive_signal == 'arrive':
                    return 0, 0
                return 0.3, 0.4
                #return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT

    V.add(DriveMode(),
          inputs=[
              'user/mode', 'turn/mode', 'user/angle', 'user/throttle',
              'pilot/angle', 'pilot/throttle', 'turn/angle', 'turn/throttle',
              'arrive_signal'
          ],
          outputs=['angle', 'throttle'])

    #zd d
    #to give the car a boost when starting ai mode in a race.

    #7 ********************************************* AiRunCondition ********************************************
    class AiRunCondition:
        '''
        A bool part to let us know when ai is running.
        '''
        def run(self, mode):
            if mode == "user":
                return False
            return True

    V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])

    #Ai Recording zd d

    #8 ********************************************* Motor ********************************************
    #Drive train setup
    # move power
    if cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
        print('DC start!')
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM

        LEFT_PID_DIR1 = 11
        LEFT_PID_DIR2 = 13
        LEFT_PWM = 15
        RIGHT_PID_DIR1 = 12
        RIGHT_PID_DIR2 = 16
        RIGHT_PWM = 18

        # left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
        # right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
        left_motor = Mini_HBridge_DC_Motor_PWM(LEFT_PID_DIR1, LEFT_PID_DIR2,
                                               LEFT_PWM)
        right_motor = Mini_HBridge_DC_Motor_PWM(RIGHT_PID_DIR1, RIGHT_PID_DIR2,
                                                RIGHT_PWM)
        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
              inputs=['throttle', 'angle'],
              outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])

    #9 ********************************************* tub ********************************************
    #add tub to save data
    '''
     inputs=['cam/image_array',
                'user/angle', 'user/throttle', 
                'user/mode']
    
        types=['image_array',
               'float', 'float',
               'str']
    '''

    inputs = [
        'cam/image_array', 'user/angle', 'user/throttle', 'user/arrival',
        'user/mode'
    ]

    types = ['image_array', 'float', 'float', 'float', 'str']
    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
    V.add(tub,
          inputs=inputs,
          outputs=["tub/num_records"],
          run_condition='recording')

    # zd d

    if type(ctr) is LocalWebController:
        print("You can now go to <your pi ip address>:8887 to drive your car.")

    #run the vehicle for 20 seconds

    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
def drive(cfg, args=None, model_type=None, camera_type='single', meta=[]):
    model_type = cfg.DEFAULT_MODEL_TYPE
    speed = args.speed
    maxloop = args.maxloop

    print("maxloop=", maxloop, ",speed=", speed)

    ###########################################################
    # 1) 동키카를 초기화(donkey사용을 위한 환경 설정)
    ###########################################################
    V = dk.vehicle.Vehicle()

    ###########################################################
    # 2) 주행환경 설정하기
    ###########################################################
    class EnvSetting(object):
        def __init__(self, mode='user', throttle=12):  # 12%
            self.mode = mode
            self.throttle = throttle * 0.01

        def run(self):
            return self.mode, self.throttle

    V.add(EnvSetting(throttle=speed),
          inputs=[],
          outputs=['user/mode', 'user/throttle'])

    ###########################################################
    # 3) 카메라 설정(PICAM)
    ###########################################################
    print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)

    inputs = []
    threaded = True

    dpcar_opt = {'org': True, 'hsv': True, 'edges': True, 'lanes': True}
    if cfg.CAMERA_TYPE == "PICAM":
        cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, \
        vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP,mode='deepicar', dpcar_opt=dpcar_opt)
        print(cfg.IMAGE_H, "x", cfg.IMAGE_W)

    else:
        raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

    V.add(cam,
          inputs=inputs,
          outputs=['cam/image_array', 'user/angle'],
          threaded=threaded)

    ###########################################################
    # 4) 주행기록저장을 위한 클래스 생성 및 V.add()
    ###########################################################
    class RecordTracker:
        def __init__(self):
            self.last_num_rec_print = 0
            self.dur_alert = 0
            self.force_alert = 0

        def run(self, num_records):
            print("num_records=>", num_records)

            if num_records is None:
                return 0

            if self.last_num_rec_print != num_records or self.force_alert:
                self.last_num_rec_print = num_records

                if num_records % 10 == 0:
                    print("recorded", num_records, "records")

                if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
                    self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
                    self.force_alert = 0

            if self.dur_alert > 0:
                self.dur_alert -= 1

            if self.dur_alert != 0:
                return get_record_alert_color(num_records)

            return 0

    rec_tracker_part = RecordTracker()
    V.add(rec_tracker_part,
          inputs=["tub/num_records"],
          outputs=['records/alert'])

    ###########################################################
    # 5) DriveMode 설정하기
    ###########################################################
    #Choose what inputs should change the car.
    class DriveMode:
        def run(self, mode, user_angle, user_throttle, pilot_angle,
                pilot_throttle):
            if mode == 'user':
                print("user_angle->", user_angle, ", user_throttle->",
                      user_throttle)
                return user_angle, user_throttle, True

            elif mode == 'local_angle':
                return pilot_angle if pilot_angle else 0.0, user_throttle, True

            else:
                return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0, True

    V.add(DriveMode(),
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle', 'recording'])

    ###########################################################
    # 6) PIGPIO_PWM 설정하기
    # - 현재 EMPV1에서는 "DC_STREER_THROTTLE를 사용
    ###########################################################

    if cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
        from donkeycar.parts.actuator import RPi_GPIO_Servo

        #steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
        steering = RPi_GPIO_Servo(cfg.HBRIDGE_PIN_RIGHT, verbose=False)
        throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD,
                                             cfg.HBRIDGE_PIN_BWD)

        print(">>>>DC_STEER_THROTTLE")
        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])

###########################################################
# 7) Tub 설정하기
###########################################################

    inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode']
    types = ['image_array', 'float', 'float', 'str']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
    V.add(tub,
          inputs=inputs,
          outputs=["tub/num_records"],
          run_condition='recording')

    ###########################################################3yy
    # 8) V.start (Main Loop)
    ###########################################################

    #V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=maxloop)
예제 #17
0
        print(image)
        return 1, 1

#CAMERA

if cfg.DONKEY_GYM:
    from donkeycar.parts.dgym import DonkeyGymEnv 
    cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)
    inputs = ["steering", 'target_speed']

    logger = EpisodeLogger()
    V.add(logger, inputs=["training", "steering", "target_speed", "target_speed", "pos", "vel"])

else:
    inputs = []
    cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)

V.add(cam, inputs=inputs, outputs=["image"], threaded=True)

#V.add(Constant(), inputs=["image"], outputs=["steering", "target_speed"])

if cfg.RL_ALG_TYPE == "dreamer":
    from donkeycar.parts.rl_agent_dreamer import RL_Agent
    agent = RL_Agent(alg_type=cfg.RL_ALG_TYPE, sim=cfg.DONKEY_GYM, car_name=cfg.DONKEY_UNIQUE_NAME)
else:
    from donkeycar.parts.rl_agent_sac import RL_Agent
    agent = RL_Agent(alg_type=cfg.RL_ALG_TYPE, sim=cfg.DONKEY_GYM, car_name=cfg.DONKEY_UNIQUE_NAME, encoder_update=cfg.ENCODER_UPDATE, max_episode_steps=cfg.MAX_EPISODE_STEPS)


V.add(agent, inputs=["image", "speed"], outputs=["steering", "target_speed", "training"], threaded=False)
예제 #18
0
def drive(cfg, model_path=None, use_joystick=False):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    #Initialisation the CAR
    V = dk.vehicle.Vehicle()

    # Initialisation of the CAMERA
    if cfg.OS == cfg.LINUX_OS:
        if cfg.CAMERA_ID == cfg.CAMERA_MACBOOK:
            print("Camera {} can't be used.".format(cfg.CAMERA_MACBOOK))
            sys.exit()
        elif cfg.CAMERA_ID == cfg.CAMERA_MOCK:
            cam = MockCamera()
        else:
            cam = PiCamera(camera=cfg.CAMERA_ID,
                           resolution=cfg.CAMERA_RESOLUTION)
    elif cfg.OS == cfg.MAC_OS:
        if cfg.CAMERA_ID == cfg.CAMERA_MACBOOK:
            cam = MacWebcam()
        else:
            cam = VideoStream(camera=cfg.CAMERA_ID,
                              framerate=cfg.DRIVE_LOOP_HZ)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        ctr = JoystickController(
            max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
            steering_scale=cfg.JOYSTICK_STEERING_SCALE,
            auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, modes, and more.
        ctr = LocalWebController()

    if (cfg.LANE_DETECTOR):
        ctr_inputs = ['cam/image_overlaid_lanes', 'ld/runtime']

        #Lane Detector
        #Detects the lane the car is in and outputs a best estimate for the 2 lines
        lane_detector = LanesDetector(cfg.CAMERA_ID, cfg.LD_PARAMETERS)
        V.add(lane_detector,
              inputs=['cam/image_array', 'algorithm/reset'],
              outputs=['cam/image_overlaid_lanes', 'ld/runtime'])
    else:
        ctr_inputs = ['cam/image_array']

    V.add(
        ctr,
        ctr_inputs,
        outputs=[
            'user/angle', 'user/throttle', 'user/mode', 'recording'
            #'algorithm/reset'
        ],
        threaded=True)

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    #Run the pilot if the mode is not user.
    kl = KerasLinear()
    if model_path:
        kl.load(model_path)

    V.add(kl,
          inputs=['cam/image_array'],
          outputs=['pilot/angle', 'pilot/throttle'],
          run_condition='run_pilot')

    #Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    if cfg.OS == cfg.LINUX_OS:
        steering_controller = PCA9685(cfg.STEERING_CHANNEL)
        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
    elif cfg.OS == cfg.MAC_OS:
        steering_controller = MockController()
        throttle_controller = MockController()

    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           center_pulse=cfg.STEERING_CENTER_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle = PWMThrottle(
        controller=throttle_controller,
        cal_max_pulse=cfg.THROTTLE_FORWARD_CAL_PWM,
        max_pulse=cfg.THROTTLE_FORWARD_PWM,
        cal_min_pulse=cfg.THROTTLE_REVERSE_CAL_PWM,
        min_pulse=cfg.THROTTLE_REVERSE_PWM,
        zero_pulse=cfg.THROTTLE_STOPPED_PWM,
    )

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    #add tub to save data
    inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode']
    types = ['image_array', 'float', 'float', 'str']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    print("You can now go to <your pi ip address>:8887 to drive your car.")

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #19
0
파일: manage.py 프로젝트: EvanWY/jaws
def drive(cfg, model_path=None, meta=[]):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    #Initialize car
    V = dk.vehicle.Vehicle()

    from donkeycar.parts.camera import PiCamera
    V.add(PiCamera(image_w=cfg.IMAGE_W,
                   image_h=cfg.IMAGE_H,
                   image_d=cfg.IMAGE_DEPTH),
          outputs=['cam/image_array'],
          threaded=True)

    V.add(LocalWebController(),
          inputs=['cam/image_array_face_box'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)
    print("You can now go to <your pi ip address>:8887 to drive your car.")

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    class PilotCondition:
        def run(self, mode):
            if mode == 'user':
                return False
            else:
                return True

    V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    class ImgPreProcess():
        '''
        preprocess camera image for inference.
        normalize and crop if needed.
        '''
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, img_arr):
            return normalize_and_crop(img_arr, self.cfg)

    V.add(ImgPreProcess(cfg),
          inputs=['cam/image_array'],
          outputs=['cam/normalized/cropped'],
          run_condition='run_pilot')

    def load_model(kl, model_path):
        start = time.time()
        print('loading model', model_path)
        kl.load(model_path)
        print('finished loading in %s sec.' % (str(time.time() - start)))

    kl = dk.utils.get_model_by_type('jaws', cfg)
    load_model(kl, model_path)

    V.add(kl,
          inputs=['cam/normalized/cropped'],
          outputs=['face_x', 'face_y', 'face_w', 'face_h', 'confidence'],
          run_condition='run_pilot')

    class FaceFollowingPilot:
        def run(self, x, y, w, h, confidence):
            return 0, 0

    V.add(FaceFollowingPilot(),
          inputs=['face_x', 'face_y', 'face_w', 'face_h', 'confidence'],
          outputs=['pilot/angle', 'pilot/throttle'],
          run_condition='run_pilot')

    class DrawBoxForFaceDetection:
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, img, x, y, w, h, confidence):
            if img is not None:
                color = np.array([0, 255, 0], dtype=np.uint8)
                color2 = np.array([255, 255, 0], dtype=np.uint8)
                img_out = np.copy(img)
                img_w, img_h = self.cfg.IMAGE_W, self.cfg.IMAGE_H
                x_px = int(img_w * x)
                y_px = int(img_h * y)
                w_px = int(img_w * w)
                h_px = int(img_h * h)
                print('x:{}, y:{}, w:{}, h:{}'.format(x_px, y_px, w_px, h_px))

                u = y_px
                d = y_px + h_px
                l = x_px
                r = x_px + w_px

                u = min(img_h - 1, max(0, u))
                d = min(img_h - 1, max(0, d))
                l = min(img_w - 1, max(0, l))
                r = min(img_w - 1, max(0, r))

                img_out[u:d, l] = color
                img_out[u:d, r] = color
                img_out[u, l:r] = color2
                img_out[d, l:r] = color
                return img_out

            return img

    V.add(DrawBoxForFaceDetection(cfg),
          inputs=[
              'cam/image_array', 'face_x', 'face_y', 'face_w', 'face_h',
              'confidence'
          ],
          outputs=['cam/image_array_face_box'],
          run_condition='run_pilot')

    #Choose what inputs should change the car.
    class DriveMode:
        def run(self, mode, user_angle, user_throttle, pilot_angle,
                pilot_throttle):
            if mode == 'user':
                return user_angle, user_throttle
            elif mode == 'local_angle':
                return pilot_angle, user_throttle
            else:
                return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT

    V.add(DriveMode(),
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    class AiRunCondition:
        '''
        A bool part to let us know when ai is running.
        '''
        def run(self, mode):
            if mode == "user":
                return False
            return True

    V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])

    # drive chain
    # V.add(steering, inputs=['angle'])
    # V.add(throttle, inputs=['throttle'])
    class Dummy:
        def run(self):
            return 0

    V.add(Dummy(), outputs=['climbing'])
    V.add(JawsController(),
          inputs=['angle', 'climbing', 'throttle'],
          threaded=True)

    #add tub to save data
    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(
        inputs=[
            'cam/image_array', 'face_x', 'face_y', 'face_w', 'face_h',
            'confidence'
        ],
        types=['image_array', 'float', 'float', 'float', 'float', 'float'],
        user_meta=meta)
    V.add(tub,
          inputs=[
              'cam/image_array', 'face_x', 'face_y', 'face_w', 'face_h',
              'confidence'
          ],
          outputs=["tub/num_records"],
          run_condition='recording')

    # if cfg.PUB_CAMERA_IMAGES:
    #     from donkeycar.parts.network import TCPServeValue
    #     from donkeycar.parts.image import ImgArrToJpg
    #     pub = TCPServeValue("camera")
    #     V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
    #     V.add(pub, inputs=['jpg/bin'])

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #20
0
파일: manage.py 프로젝트: htw135/project-tv
def drive(cfg, model_path=None, use_joystick=False, use_reader=False):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    #Initialize car
    V = dk.vehicle.Vehicle()
    cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        ctr = JoystickController(
            max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
            steering_scale=cfg.JOYSTICK_STEERING_SCALE,
            auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
    elif use_reader or cfg.USE_READER_AS_DEFAULT:
        pi = pigpio.pi()
        ctr = reader(pi,
                     12,
                     18,
                     auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)

    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, and modes, and more.
        ctr = LocalWebController()

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    #Run the pilot if the mode is not user.
    kl = KerasCategorical()
    if model_path:
        kl.load(model_path)

    V.add(kl,
          inputs=['cam/image_array'],
          outputs=['pilot/angle', 'pilot/throttle'],
          run_condition='run_pilot')

    #Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    steering_controller = PCA9685(cfg.STEERING_CHANNEL)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    #add tub to save data
    inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode']
    types = ['image_array', 'float', 'float', 'str']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)

    print("You can now go to <your pi ip address>:8887 to drive your car.")
예제 #21
0
def drive(cfg,
          model_path=None,
          use_joystick=False,
          use_tx=False,
          use_pirf=False):
    '''
    Start the drive loop
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    # Initialize car
    V = dk.vehicle.Vehicle()

    if cfg.USE_WEB_CAMERA:
        cam = Webcam(resolution=cfg.CAMERA_RESOLUTION)
    else:
        cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)

    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        # modify max_throttle closer to 1.0 to have more power
        # modify steering_scale lower than 1.0 to have less responsive steering
        ctr = JoystickController(
            max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
            steering_scale=cfg.JOYSTICK_STEERING_SCALE,
            auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE,
            throttle_axis=cfg.JOYSTICK_THROTTLE_AXIS,
            steering_axis=cfg.JOYSTICK_STEERING_AXIS,
            btn_mode=cfg.JOYSTICK_DRIVING_MODE_BUTTON,
            btn_record_toggle=cfg.JOYSTICK_RECORD_TOGGLE_BUTTON,
            btn_inc_max_throttle=cfg.JOYSTICK_INCREASE_MAX_THROTTLE_BUTTON,
            btn_dec_max_throttle=cfg.JOYSTICK_DECREASE_MAX_THROTTLE_BUTTON,
            btn_inc_throttle_scale=cfg.JOYSTICK_INCREASE_THROTTLE_SCALE_BUTTON,
            btn_dec_throttle_scale=cfg.JOYSTICK_DECREASE_THROTTLE_SCALE_BUTTON,
            btn_inc_steer_scale=cfg.JOYSTICK_INCREASE_STEERING_SCALE_BUTTON,
            btn_dec_steer_scale=cfg.JOYSTICK_DECREASE_STEERING_SCALE_BUTTON,
            btn_toggle_const_throttle=cfg.
            JOYSTICK_TOGGLE_CONSTANT_THROTTLE_BUTTON,
            verbose=cfg.JOYSTICK_VERBOSE)
    elif use_tx or cfg.USE_TX_AS_DEFAULT:
        ctr = TxController(throttle_tx_min=cfg.TX_THROTTLE_MIN,
                           throttle_tx_max=cfg.TX_THROTTLE_MAX,
                           steering_tx_min=cfg.TX_STEERING_MIN,
                           steering_tx_max=cfg.TX_STEERING_MAX,
                           throttle_tx_thresh=cfg.TX_THROTTLE_TRESH,
                           verbose=cfg.TX_VERBOSE)
        fpv = FPVWebController()
        V.add(fpv, inputs=['cam/image_array'], threaded=True)
    elif use_pirf or cfg.USE_PI_RF_AS_DEFAULT:
        ctr = PiRfController(throttle_tx_min=cfg.PI_RF_THROTTLE_MIN,
                             throttle_tx_max=cfg.PI_RF_THROTTLE_MAX,
                             steering_tx_min=cfg.PI_RF_STEERING_MIN,
                             steering_tx_max=cfg.PI_RF_STEERING_MAX,
                             throttle_tx_thresh=cfg.PI_RF_THROTTLE_TRESH,
                             steering_pin=cfg.PI_RF_STEERING_PIN,
                             throttle_pin=cfg.PI_RF_THROTTLE_PIN,
                             verbose=cfg.PI_RF_VERBOSE)
        fpv = FPVWebController()
        V.add(fpv, inputs=['cam/image_array'], threaded=True)
    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, and modes, and more.
        ctr = LocalWebController()

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    emergencyCtrl = EmergencyController()

    V.add(emergencyCtrl,
          inputs=['user/mode'],
          outputs=['user/mode'],
          threaded=True)

    # See if we should even run the pilot module.
    # This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    if not use_tx and not use_pirf:
        # Run the pilot if the mode is not user and not Tx.
        kl = KerasCategorical()
        #kl = KerasLinear()
        if model_path:
            kl.load(model_path)

        V.add(kl,
              inputs=['cam/image_array'],
              outputs=['pilot/angle', 'pilot/throttle'],
              run_condition='run_pilot')

    # Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    if cfg.USE_PWM_ACTUATOR:
        steering_controller = PCA9685(cfg.STEERING_CHANNEL)

        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle', 'user/mode'])

    # add tub to save data
    inputs = [
        'cam/image_array', 'user/angle', 'user/throttle', 'user/mode',
        'pilot/angle', 'pilot/throttle'
    ]
    types = [
        'image_array', 'float', 'float', 'str', 'numpy.float32',
        'numpy.float32'
    ]

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    # run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)

    print("You can now go to <your pi ip address>:8887 to drive your car.")
예제 #22
0
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    if cfg.DONKEY_GYM:
        #the simulator will use cuda and then we usually run out of resources
        #if we also try to use cuda. so disable for donkey_gym.
        os.environ["CUDA_VISIBLE_DEVICES"]="-1"

    if model_type is None:
        if cfg.TRAIN_LOCALIZER:
            model_type = "localizer"
        elif cfg.TRAIN_BEHAVIORS:
            model_type = "behavior"
        else:
            model_type = cfg.DEFAULT_MODEL_TYPE

    #Initialize car
    #实例化小车
    V = dk.vehicle.Vehicle()
    range = Sensor(pi, cfg.RANGE_GPIOS)
    # range.update_loop_body()
    # V.add(range, outputs=['range/cms'], threaded=True)

    print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
    if camera_type == "stereo":

        if cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam

            camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
            camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)

        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam

            camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
            camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
        else:
            raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))

        V.add(camA, outputs=['cam/image_array_a'], threaded=True)
        V.add(camB, outputs=['cam/image_array_b'], threaded=True)

        from donkeycar.parts.image import StereoPair

        V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
            outputs=['cam/image_array'])
    elif cfg.CAMERA_TYPE == "D435":
        from donkeycar.parts.realsense435i import RealSense435i
        cam = RealSense435i(
            enable_rgb=cfg.REALSENSE_D435_RGB,
            enable_depth=cfg.REALSENSE_D435_DEPTH,
            enable_imu=cfg.REALSENSE_D435_IMU,
            device_id=cfg.REALSENSE_D435_ID)
        V.add(cam, inputs=[],
              outputs=['cam/image_array', 'cam/depth_array',
                       'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
                       'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
              threaded=True)

    else:
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv

        inputs = []
        threaded = True
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv 
            cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, delay=cfg.SIM_ARTIFICIAL_LATENCY)
            threaded = True
            inputs = ['angle', 'throttle']
        elif cfg.CAMERA_TYPE == "PICAM":
            from donkeycar.parts.camera import PiCamera
            print("NBNBNBNBNBNBNNBNBNBNBNBNB",666666)
            cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
            # pos = cam.update()
            # print(pos)
            
        elif cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam
            cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam
            cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CSIC":
            from donkeycar.parts.camera import CSICamera
            cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
        elif cfg.CAMERA_TYPE == "V4L":
            from donkeycar.parts.camera import V4LCamera
            cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
        elif cfg.CAMERA_TYPE == "MOCK":
            from donkeycar.parts.camera import MockCamera
            cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "IMAGE_LIST":
            from donkeycar.parts.camera import ImageListCamera
            cam = ImageListCamera(path_mask=cfg.PATH_MASK)
        else:
            raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

        V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        if cfg.CONTROLLER_TYPE == "MM1":
            from donkeycar.parts.robohat import RoboHATController            
            ctr = RoboHATController(cfg)
        elif "custom" == cfg.CONTROLLER_TYPE:
            #
            # custom controller created with `donkey createjs` command
            #
            from my_joystick import MyJoystickController
            ctr = MyJoystickController(
                throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
                throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
                steering_scale=cfg.JOYSTICK_STEERING_SCALE,
                auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
            ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
        else:
            from donkeycar.parts.controller import get_js_controller

            ctr = get_js_controller(cfg)

            if cfg.USE_NETWORKED_JS:
                from donkeycar.parts.controller import JoyStickSub
                netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
                V.add(netwkJs, threaded=True)
                ctr.js = netwkJs
        
        V.add(ctr, 
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, and modes, and more.
        ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
        
        V.add(ctr,
          inputs=['cam/image_array', 'tub/num_records'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    #this throttle filter will allow one tap back for esc reverse
    th_filter = ThrottleFilter()
    V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    class PilotCondition:
        def run(self, mode):
            if mode == 'user':
                return False
            else:
                return True

    V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    class LedConditionLogic:
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
            #returns a blink rate. 0 for off. -1 for on. positive for rate.

            if track_loc is not None:
                led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
                return -1

            if model_file_changed:
                led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
                return 0.1
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if recording_alert:
                led.set_rgb(*recording_alert)
                return self.cfg.REC_COUNT_ALERT_BLINK_RATE
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if behavior_state is not None and model_type == 'behavior':
                r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
                led.set_rgb(r, g, b)
                return -1 #solid on

            if recording:
                return -1 #solid on
            elif mode == 'user':
                return 1
            elif mode == 'local_angle':
                return 0.5
            elif mode == 'local':
                return 0.1
            return 0

    if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
        from donkeycar.parts.led_status import RGB_LED
        led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
        led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)

        V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
              outputs=['led/blink_rate'])

        V.add(led, inputs=['led/blink_rate'])

    def get_record_alert_color(num_records):
        col = (0, 0, 0)
        for count, color in cfg.RECORD_ALERT_COLOR_ARR:
            if num_records >= count:
                col = color
        return col

    class RecordTracker:
        def __init__(self):
            self.last_num_rec_print = 0
            self.dur_alert = 0
            self.force_alert = 0

        def run(self, num_records):
            if num_records is None:
                return 0

            if self.last_num_rec_print != num_records or self.force_alert:
                self.last_num_rec_print = num_records

                if num_records % 10 == 0:
                    print("recorded", num_records, "records")

                if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
                    self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
                    self.force_alert = 0

            if self.dur_alert > 0:
                self.dur_alert -= 1

            if self.dur_alert != 0:
                return get_record_alert_color(num_records)

            return 0

    rec_tracker_part = RecordTracker()
    V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])

    if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
        #then we are not using the circle button. hijack that to force a record count indication
        def show_record_acount_status():
            rec_tracker_part.last_num_rec_print = 0
            rec_tracker_part.force_alert = 1
        ctr.set_button_down_trigger('circle', show_record_acount_status)

    #Sombrero
    if cfg.HAVE_SOMBRERO:
        from donkeycar.parts.sombrero import Sombrero
        s = Sombrero()

    #IMU
    if cfg.HAVE_IMU:
        from donkeycar.parts.imu import IMU
        imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
        V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
            'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)

    class ImgPreProcess():
        '''
        preprocess camera image for inference.
        normalize and crop if needed.
        '''
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, img_arr):
            return normalize_and_crop(img_arr, self.cfg)

    if "coral" in model_type:
        inf_input = 'cam/image_array'
    else:
        inf_input = 'cam/normalized/cropped'
        V.add(ImgPreProcess(cfg),
            inputs=['cam/image_array'],
            outputs=[inf_input],
            run_condition='run_pilot')

    # Use the FPV preview, which will show the cropped image output, or the full frame.
    if cfg.USE_FPV:
        V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)

    #Behavioral state
    if cfg.TRAIN_BEHAVIORS:
        bh = BehaviorPart(cfg.BEHAVIOR_LIST)
        V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
        try:
            ctr.set_button_down_trigger('L1', bh.increment_state)
        except:
            pass

        inputs = [inf_input, "behavior/one_hot_state_array"]
    #IMU
    elif model_type == "imu":
        assert(cfg.HAVE_IMU)
        #Run the pilot if the mode is not user.
        inputs=[inf_input,
            'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
            'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
    else:
        inputs=[inf_input]

    def load_model(kl, model_path):
        start = time.time()
        print('loading model', model_path)
        kl.load(model_path)
        print('finished loading in %s sec.' % (str(time.time() - start)) )

    def load_weights(kl, weights_path):
        start = time.time()
        try:
            print('loading model weights', weights_path)
            kl.model.load_weights(weights_path)
            print('finished loading in %s sec.' % (str(time.time() - start)) )
        except Exception as e:
            print(e)
            print('ERR>> problems loading weights', weights_path)

    def load_model_json(kl, json_fnm):
        start = time.time()
        print('loading model json', json_fnm)
        from tensorflow.python import keras
        try:
            with open(json_fnm, 'r') as handle:
                contents = handle.read()
                kl.model = keras.models.model_from_json(contents)
            print('finished loading json in %s sec.' % (str(time.time() - start)) )
        except Exception as e:
            print(e)
            print("ERR>> problems loading model json", json_fnm)

    if model_path:
        #When we have a model, first create an appropriate Keras part
        kl = dk.utils.get_model_by_type(model_type, cfg)

        model_reload_cb = None

        if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
            #when we have a .h5 extension
            #load everything from the model file
            load_model(kl, model_path)

            def reload_model(filename):
                load_model(kl, filename)

            model_reload_cb = reload_model

        elif '.json' in model_path:
            #when we have a .json extension
            #load the model from there and look for a matching
            #.wts file with just weights
            load_model_json(kl, model_path)
            weights_path = model_path.replace('.json', '.weights')
            load_weights(kl, weights_path)

            def reload_weights(filename):
                weights_path = filename.replace('.json', '.weights')
                load_weights(kl, weights_path)

            model_reload_cb = reload_weights

        else:
            print("ERR>> Unknown extension type on model file!!")
            return

        #this part will signal visual LED, if connected
        V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])

        #these parts will reload the model file, but only when ai is running so we don't interrupt user driving
        V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
        V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
        V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")

        outputs=['pilot/angle', 'pilot/throttle']

        if cfg.TRAIN_LOCALIZER:
            outputs.append("pilot/loc")

        V.add(kl, inputs=inputs,
            outputs=outputs,
            run_condition='run_pilot')
    
    if cfg.STOP_SIGN_DETECTOR:
        from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
        V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])

    #Choose what inputs should change the car.
    imgW = 160
    imgH = 120
    def JudgeLR(p):
        #设置缓冲距离
        SILENT_LR_DISTANCE = imgW/12
        #img中心点
        center = [imgW/2, imgH/2]
        #如果目标位于图片中心左侧,小车左转
        p1=(p[0]+p[2])/2
        p2=center[0]-SILENT_LR_DISTANCE
        p3=(p[0]+p[2])/2
        p4=center[0]+SILENT_LR_DISTANCE
        if p1<p2:
            print(-0.5)
            return -1
        #如果目标位于图片中心右侧,小车右转
        elif p3>p4:
            print(0.5)
            return 1
    def JudgeFB(p):
        #设置缓冲距离
        SILENT_FB_DISTANCE = imgH/12
        #img中心点
        center = [imgW/2, imgH/2]
        #如果目标上边框>imgH的1/3,小车前进
        p5=p[3]/imgH
        if p5>1/3:
            return 0.75
        else: 
            return 0
    class DriveMode:
        def run(self, mode,
                    user_angle, user_throttle,
                    pilot_angle, pilot_throttle):
            global force_stop
            global music_ing
            #获取超声传感器测距信息distance
            distance = range.distance_get()
            #获取目标检测结果
            with open("/home/pi/projects/donkeycar/parts/pos_data.txt", "rb") as f:
                try:
                    outputxmin = pickle.load(f)
                    outputymin = pickle.load(f)
                    outputxmax = pickle.load(f)
                    outputymax = pickle.load(f)
                    # music_controller = pickle.load(f)
                    music_on = pickle.load(f)
                    print(music_on)
                except EOFError:
                    return None
            p=[outputxmin,outputymin,outputxmax,outputymax]
            print("music_ing",music_ing)
            print("nusic_on",music_on)
            
            # if music_controller == 1:
                # os.system('mpg123 music.mp3')
            # elif music_controller == 0:
                # os.system('q')
            imgW = 160
            imgH = 120
            #设置缓冲距离
            SILENT_LR_DISTANCE = imgW/12
            #img中心点
            center = [imgW/2, imgH/2]
            p1=(p[0]+p[2])/2
            p2=center[0]-SILENT_LR_DISTANCE
            p3=(p[0]+p[2])/2
            p4=center[0]+SILENT_LR_DISTANCE
            d1=imgH-p[3]
            d2=p1-center[0]
            angle = -0.3
            if p[0]<0:#若未检测到行人
                user_throttle = 0.63
                user_angle = angle
                print('未检测到行人!!')
                print("角度",angle)
                if distance < 40:
                    print('distance:',distance)
                    user_throttle = 0.0
                    user_angle = -0.3
            else:#若检测到行人
                if distance >= 40:
                    #根据超声测距控制油门
                    throttle = (0.15/110)*distance+0.58
                    if throttle<0.7:
                        user_throttle = throttle
                    else:
                        user_throttle = 0.7
                    #根据图像控制油门
                    # user_throttle = 0.65
                    
                    # user_angle = -0.3
                    # #如果目标位于图片中心左侧,小车左转
                    # if p1<p2:
                        # print("左转",-0.5)
                        # user_angle = -0.5
                    # #如果目标位于图片中心右侧,小车右转
                    # elif p3>p4:
                        # print("右转",0.2)
                        # user_angle = 0.2
                    # # elif d1>imgH/3:
                        # # print('加速')
                        # # user_throttle = 0.65
                    # # elif d1<imgH/3 and d1>imgH/4:
                        # # print('减速')
                        # # user_throttle = 0.65
                    user_angle = (0.7/70)*d2-0.3
                    if user_angle<-0.3
                        angle = -0.3+(abs(user_angle+0.3))/3
                    elif user_angle>-0.3
                        angle = -0.3-(abs(user_angle+0.3))/3
                elif distance < 40:
                    print('distance:',distance)
                    user_throttle = 0.0
                    user_angle = -0.3
                    angle = user_angle
            if music_on == 1 and music_ing==0:
                pygame.mixer.music.load("music1.mp3")
                pygame.mixer.music.play(-1)
                music_ing = 1
                user_throttle = 0
            elif music_on == 1 and music_ing == 1:
                user_throttle = 0
            elif music_on == 0 and music_ing == 1:
                pygame.mixer.music.stop()
                music_ing = 0
            if mode == 'user':
                print('油门',user_throttle)
                print('角度',user_angle)
                print(p)
                return user_angle, user_throttle

            elif mode == 'local_angle':
                return pilot_angle if pilot_angle else 0.0, user_throttle

            # else:
                # if distance < 30:
                    # print('2:',distance)
                    # pilot_throttle = 0.0
                # return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0
            else:
                if p[0]<0:#若未检测到行人
                    pilot_throttle = 0.0
                    pilot_angle = 0.0
                    print('未检测到行人',p)
                    return pilot_angle,pilot_throttle
                else:
                    print('检测到行人!!!')
                    if distance >= 30:
                        return JudgeLR(p),JudgeFB(p)
                    elif distance < 30:
                        print('2:',distance)
                        pilot_throttle = 0.0
                    return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0

    V.add(DriveMode(),
          inputs=['user/mode', 'user/angle', 'user/throttle',
                  'pilot/angle', 'pilot/throttle'],
          outputs=['angle', 'throttle'])


    #to give the car a boost when starting ai mode in a race.
    aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)

    V.add(aiLauncher,
        inputs=['user/mode', 'throttle'],
        outputs=['throttle'])

    if isinstance(ctr, JoystickController):
        ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)


    class AiRunCondition:
        '''
        A bool part to let us know when ai is running.
        '''
        def run(self, mode):
            if mode == "user":
                return False
            return True

    V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])

    #Ai Recording
    class AiRecordingCondition:
        '''
        return True when ai mode, otherwize respect user mode recording flag
        '''
        def run(self, mode, recording):
            if mode == 'user':
                return recording
            return True

    if cfg.RECORD_DURING_AI:
        V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])

    #Drive train setup
    if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
        pass
    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
        from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle

        steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
        steering = PWMSteering(controller=steering_controller,
                                        left_pulse=cfg.STEERING_LEFT_PWM,
                                        right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
        throttle = PWMThrottle(controller=throttle_controller,
                                        max_pulse=cfg.THROTTLE_FORWARD_PWM,
                                        zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                                        min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['angle'], threaded=True)
        V.add(throttle, inputs=['throttle'], threaded=True)


    elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM

        steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
        throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])


    elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM

        left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
        right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
                inputs=['throttle', 'angle'],
                outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])

    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
        from donkeycar.parts.actuator import ServoBlaster, PWMSteering
        steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
        #PWM pulse values should be in the range of 100 to 200
        assert(cfg.STEERING_LEFT_PWM <= 200)
        assert(cfg.STEERING_RIGHT_PWM <= 200)
        steering = PWMSteering(controller=steering_controller,
                                        left_pulse=cfg.STEERING_LEFT_PWM,
                                        right_pulse=cfg.STEERING_RIGHT_PWM)


        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
        motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'], threaded=True)
        V.add(motor, inputs=["throttle"])
        
    elif cfg.DRIVE_TRAIN_TYPE == "MM1":
        from donkeycar.parts.robohat import RoboHATDriver
        V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
    
    elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
        from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
        steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
        steering = PWMSteering(controller=steering_controller,
                                        left_pulse=cfg.STEERING_LEFT_PWM, 
                                        right_pulse=cfg.STEERING_RIGHT_PWM)
        
        throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
        throttle = PWMThrottle(controller=throttle_controller,
                                            max_pulse=cfg.THROTTLE_FORWARD_PWM,
                                            zero_pulse=cfg.THROTTLE_STOPPED_PWM, 
                                            min_pulse=cfg.THROTTLE_REVERSE_PWM)
        V.add(steering, inputs=['angle'], threaded=True)
        V.add(throttle, inputs=['throttle'], threaded=True)

    # OLED setup
    if cfg.USE_SSD1306_128_32:
        from donkeycar.parts.oled import OLEDPart
        auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
        oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
        V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)

    #add tub to save data

    inputs=['cam/image_array',
            'user/angle', 'user/throttle',
            'range/cms', #新增传感器数据 
            'user/mode']

    types=['image_array',
           'float', 'float',
           'float',      #对应新增传感器数据的类型
           'str']

    if cfg.TRAIN_BEHAVIORS:
        inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
        types += ['int', 'str', 'vector']

    if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
        inputs += ['cam/depth_array']
        types += ['gray16_array']

    if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
        inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
            'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']

        types +=['float', 'float', 'float',
           'float', 'float', 'float']

    if cfg.RECORD_DURING_AI:
        inputs += ['pilot/angle', 'pilot/throttle']
        types += ['float', 'float']
        

    # th = TubHandler(path=cfg.DATA_PATH)
    # tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
    # V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')

    if cfg.PUB_CAMERA_IMAGES:
        from donkeycar.parts.network import TCPServeValue
        from donkeycar.parts.image import ImgArrToJpg
        pub = TCPServeValue("camera")
        V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
        V.add(pub, inputs=['jpg/bin'])

    if type(ctr) is LocalWebController:
        if cfg.DONKEY_GYM:
            print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
        else:
            print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
    elif isinstance(ctr, JoystickController):
        print("You can now move your joystick to drive your car.")
        #tell the controller about the tub
        # ctr.set_tub(tub)

        if cfg.BUTTON_PRESS_NEW_TUB:

            def new_tub_dir():
                V.parts.pop()
                # tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
                # V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
                # ctr.set_tub(tub)

            ctr.set_button_down_trigger('cross', new_tub_dir)
        ctr.print_controls()

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
            max_loop_count=cfg.MAX_LOOPS)
예제 #23
0
파일: manage.py 프로젝트: muggat0n/mydonkey
def drive(cfg,
          model_path=None,
          use_joystick=False,
          model_type=None,
          camera_type='single',
          meta=[]):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    if cfg.DONKEY_GYM:
        #the simulator will use cuda and then we usually run out of resources
        #if we also try to use cuda. so disable for donkey_gym.
        os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    if model_type is None:
        if cfg.TRAIN_LOCALIZER:
            model_type = "localizer"
        elif cfg.TRAIN_BEHAVIORS:
            model_type = "behavior"
        else:
            model_type = cfg.DEFAULT_MODEL_TYPE

    #Initialize car
    V = dk.vehicle.Vehicle()

    if camera_type == "stereo":

        if cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam

            camA = Webcam(image_w=cfg.IMAGE_W,
                          image_h=cfg.IMAGE_H,
                          image_d=cfg.IMAGE_DEPTH,
                          iCam=0)
            camB = Webcam(image_w=cfg.IMAGE_W,
                          image_h=cfg.IMAGE_H,
                          image_d=cfg.IMAGE_DEPTH,
                          iCam=1)

        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam

            camA = CvCam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         iCam=0)
            camB = CvCam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         iCam=1)
        else:
            raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))

        V.add(camA, outputs=['cam/image_array_a'], threaded=True)
        V.add(camB, outputs=['cam/image_array_b'], threaded=True)

        from donkeycar.parts.image import StereoPair

        V.add(StereoPair(),
              inputs=['cam/image_array_a', 'cam/image_array_b'],
              outputs=['cam/image_array'])

    else:
        print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv

        inputs = []
        threaded = True
        print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv
            cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH,
                               env_name=cfg.DONKEY_GYM_ENV_NAME)
            threaded = True
            inputs = ['angle', 'throttle']
        elif cfg.CAMERA_TYPE == "PICAM":
            from donkeycar.parts.camera import PiCamera
            cam = PiCamera(image_w=cfg.IMAGE_W,
                           image_h=cfg.IMAGE_H,
                           image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam
            cam = Webcam(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam
            cam = CvCam(image_w=cfg.IMAGE_W,
                        image_h=cfg.IMAGE_H,
                        image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CSIC":
            from donkeycar.parts.camera import CSICamera
            cam = CSICamera(image_w=cfg.IMAGE_W,
                            image_h=cfg.IMAGE_H,
                            image_d=cfg.IMAGE_DEPTH,
                            framerate=cfg.CAMERA_FRAMERATE,
                            gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
        elif cfg.CAMERA_TYPE == "V4L":
            from donkeycar.parts.camera import V4LCamera
            cam = V4LCamera(image_w=cfg.IMAGE_W,
                            image_h=cfg.IMAGE_H,
                            image_d=cfg.IMAGE_DEPTH,
                            framerate=cfg.CAMERA_FRAMERATE)
        elif cfg.CAMERA_TYPE == "MOCK":
            from donkeycar.parts.camera import MockCamera
            cam = MockCamera(image_w=cfg.IMAGE_W,
                             image_h=cfg.IMAGE_H,
                             image_d=cfg.IMAGE_DEPTH)
        else:
            raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

        V.add(cam,
              inputs=inputs,
              outputs=['cam/image_array'],
              threaded=threaded)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        from donkeycar.parts.controller import get_js_controller

        ctr = get_js_controller(cfg)

        if cfg.USE_NETWORKED_JS:
            from donkeycar.parts.controller import JoyStickSub
            netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
            V.add(netwkJs, threaded=True)
            ctr.js = netwkJs

    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, and modes, and more.
        ctr = LocalWebController()

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    #this throttle filter will allow one tap back for esc reverse
    th_filter = ThrottleFilter()
    V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    class PilotCondition:
        def run(self, mode):
            if mode == 'user':
                return False
            else:
                return True

    V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    class LedConditionLogic:
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, mode, recording, recording_alert, behavior_state,
                model_file_changed, track_loc):
            #returns a blink rate. 0 for off. -1 for on. positive for rate.

            if track_loc is not None:
                led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
                return -1

            if model_file_changed:
                led.set_rgb(self.cfg.MODEL_RELOADED_LED_R,
                            self.cfg.MODEL_RELOADED_LED_G,
                            self.cfg.MODEL_RELOADED_LED_B)
                return 0.1
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if recording_alert:
                led.set_rgb(*recording_alert)
                return self.cfg.REC_COUNT_ALERT_BLINK_RATE
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if behavior_state is not None and model_type == 'behavior':
                r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
                led.set_rgb(r, g, b)
                return -1  #solid on

            if recording:
                return -1  #solid on
            elif mode == 'user':
                return 1
            elif mode == 'local_angle':
                return 0.5
            elif mode == 'local':
                return 0.1
            return 0

    if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
        from donkeycar.parts.led_status import RGB_LED
        led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B,
                      cfg.LED_INVERT)
        led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)

        V.add(LedConditionLogic(cfg),
              inputs=[
                  'user/mode', 'recording', "records/alert", 'behavior/state',
                  'modelfile/modified', "pilot/loc"
              ],
              outputs=['led/blink_rate'])

        V.add(led, inputs=['led/blink_rate'])

    def get_record_alert_color(num_records):
        col = (0, 0, 0)
        for count, color in cfg.RECORD_ALERT_COLOR_ARR:
            if num_records >= count:
                col = color
        return col

    class RecordTracker:
        def __init__(self):
            self.last_num_rec_print = 0
            self.dur_alert = 0
            self.force_alert = 0

        def run(self, num_records):
            if num_records is None:
                return 0

            if self.last_num_rec_print != num_records or self.force_alert:
                self.last_num_rec_print = num_records

                if num_records % 10 == 0:
                    print("recorded", num_records, "records")

                if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
                    self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
                    self.force_alert = 0

            if self.dur_alert > 0:
                self.dur_alert -= 1

            if self.dur_alert != 0:
                return get_record_alert_color(num_records)

            return 0

    rec_tracker_part = RecordTracker()
    V.add(rec_tracker_part,
          inputs=["tub/num_records"],
          outputs=['records/alert'])

    if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
        #then we are not using the circle button. hijack that to force a record count indication
        def show_record_acount_status():
            rec_tracker_part.last_num_rec_print = 0
            rec_tracker_part.force_alert = 1

        ctr.set_button_down_trigger('circle', show_record_acount_status)

    #Sombrero
    if cfg.HAVE_SOMBRERO:
        from donkeycar.parts.sombrero import Sombrero
        s = Sombrero()

    #IMU
    if cfg.HAVE_IMU:
        from donkeycar.parts.imu import Mpu6050
        imu = Mpu6050()
        V.add(imu,
              outputs=[
                  'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x',
                  'imu/gyr_y', 'imu/gyr_z'
              ],
              threaded=True)

    class ImgPreProcess():
        '''
        preprocess camera image for inference.
        normalize and crop if needed.
        '''
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, img_arr):
            return normalize_and_crop(img_arr, self.cfg)

    if "coral" in model_type:
        inf_input = 'cam/image_array'
    else:
        inf_input = 'cam/normalized/cropped'
        V.add(ImgPreProcess(cfg),
              inputs=['cam/image_array'],
              outputs=[inf_input],
              run_condition='run_pilot')

    #Behavioral state
    if cfg.TRAIN_BEHAVIORS:
        bh = BehaviorPart(cfg.BEHAVIOR_LIST)
        V.add(bh,
              outputs=[
                  'behavior/state', 'behavior/label',
                  "behavior/one_hot_state_array"
              ])
        try:
            ctr.set_button_down_trigger('L1', bh.increment_state)
        except:
            pass

        inputs = [inf_input, "behavior/one_hot_state_array"]
    #IMU
    elif model_type == "imu":
        assert (cfg.HAVE_IMU)
        #Run the pilot if the mode is not user.
        inputs = [
            inf_input, 'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x',
            'imu/gyr_y', 'imu/gyr_z'
        ]
    else:
        inputs = [inf_input]

    def load_model(kl, model_path):
        start = time.time()
        print('loading model', model_path)
        kl.load(model_path)
        print('finished loading in %s sec.' % (str(time.time() - start)))

    def load_weights(kl, weights_path):
        start = time.time()
        try:
            print('loading model weights', weights_path)
            kl.model.load_weights(weights_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except Exception as e:
            print(e)
            print('ERR>> problems loading weights', weights_path)

    def load_model_json(kl, json_fnm):
        start = time.time()
        print('loading model json', json_fnm)
        from tensorflow.python import keras
        try:
            with open(json_fnm, 'r') as handle:
                contents = handle.read()
                kl.model = keras.models.model_from_json(contents)
            print('finished loading json in %s sec.' %
                  (str(time.time() - start)))
        except Exception as e:
            print(e)
            print("ERR>> problems loading model json", json_fnm)

    if model_path:
        #When we have a model, first create an appropriate Keras part
        kl = dk.utils.get_model_by_type(model_type, cfg)

        model_reload_cb = None

        if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
            #when we have a .h5 extension
            #load everything from the model file
            load_model(kl, model_path)

            def reload_model(filename):
                load_model(kl, filename)

            model_reload_cb = reload_model

        elif '.json' in model_path:
            #when we have a .json extension
            #load the model from there and look for a matching
            #.wts file with just weights
            load_model_json(kl, model_path)
            weights_path = model_path.replace('.json', '.weights')
            load_weights(kl, weights_path)

            def reload_weights(filename):
                weights_path = filename.replace('.json', '.weights')
                load_weights(kl, weights_path)

            model_reload_cb = reload_weights

        else:
            print("ERR>> Unknown extension type on model file!!")
            return

        #this part will signal visual LED, if connected
        V.add(FileWatcher(model_path, verbose=True),
              outputs=['modelfile/modified'])

        #these parts will reload the model file, but only when ai is running so we don't interrupt user driving
        V.add(FileWatcher(model_path),
              outputs=['modelfile/dirty'],
              run_condition="ai_running")
        V.add(DelayedTrigger(100),
              inputs=['modelfile/dirty'],
              outputs=['modelfile/reload'],
              run_condition="ai_running")
        V.add(TriggeredCallback(model_path, model_reload_cb),
              inputs=["modelfile/reload"],
              run_condition="ai_running")

        outputs = ['pilot/angle', 'pilot/throttle']

        if cfg.TRAIN_LOCALIZER:
            outputs.append("pilot/loc")

        V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot')

    #Choose what inputs should change the car.
    class DriveMode:
        def run(self, mode, user_angle, user_throttle, pilot_angle,
                pilot_throttle):
            if mode == 'user':
                return user_angle, user_throttle

            elif mode == 'local_angle':
                return pilot_angle, user_throttle

            else:
                return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT

    V.add(DriveMode(),
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    #to give the car a boost when starting ai mode in a race.
    aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE,
                          cfg.AI_LAUNCH_KEEP_ENABLED)

    V.add(aiLauncher, inputs=['user/mode', 'throttle'], outputs=['throttle'])

    if isinstance(ctr, JoystickController):
        ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON,
                                    aiLauncher.enable_ai_launch)

    class AiRunCondition:
        '''
        A bool part to let us know when ai is running.
        '''
        def run(self, mode):
            if mode == "user":
                return False
            return True

    V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])

    #Ai Recording
    class AiRecordingCondition:
        '''
        return True when ai mode, otherwize respect user mode recording flag
        '''
        def run(self, mode, recording):
            if mode == 'user':
                return recording
            return True

    if cfg.RECORD_DURING_AI:
        V.add(AiRecordingCondition(),
              inputs=['user/mode', 'recording'],
              outputs=['recording'])

    #Drive train setup
    if cfg.DONKEY_GYM:
        pass

    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
        from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle

        steering_controller = PCA9685(cfg.STEERING_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL,
                                      cfg.PCA9685_I2C_ADDR,
                                      busnum=cfg.PCA9685_I2C_BUSNUM)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])

    elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM

        steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT,
                                             cfg.HBRIDGE_PIN_RIGHT)
        throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD,
                                             cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])

    elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM

        left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD,
                                               cfg.HBRIDGE_PIN_LEFT_BWD)
        right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD,
                                                cfg.HBRIDGE_PIN_RIGHT_BWD)
        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
              inputs=['throttle', 'angle'],
              outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])

    elif cfg.DRIVE_TRAIN_TYPE == "SKID_STEER":
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, SBC_PiMotor

        right_motor = SBC_PiMotor(1)
        left_motor = SBC_PiMotor(2)

        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
              inputs=['throttle', 'angle'],
              outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])

    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
        from donkeycar.parts.actuator import ServoBlaster, PWMSteering
        steering_controller = ServoBlaster(cfg.STEERING_CHANNEL)  #really pin
        #PWM pulse values should be in the range of 100 to 200
        assert (cfg.STEERING_LEFT_PWM <= 200)
        assert (cfg.STEERING_RIGHT_PWM <= 200)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
        motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD,
                                          cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'])
        V.add(motor, inputs=["throttle"])

    #add tub to save data

    inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode']

    types = ['image_array', 'float', 'float', 'str']

    if cfg.TRAIN_BEHAVIORS:
        inputs += [
            'behavior/state', 'behavior/label', "behavior/one_hot_state_array"
        ]
        types += ['int', 'str', 'vector']

    if cfg.HAVE_IMU:
        inputs += [
            'imu/acl_x', 'imu/acl_y', 'imu/acl_z', 'imu/gyr_x', 'imu/gyr_y',
            'imu/gyr_z'
        ]

        types += ['float', 'float', 'float', 'float', 'float', 'float']

    if cfg.RECORD_DURING_AI:
        inputs += ['pilot/angle', 'pilot/throttle']
        types += ['float', 'float']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
    V.add(tub,
          inputs=inputs,
          outputs=["tub/num_records"],
          run_condition='recording')

    if cfg.PUB_CAMERA_IMAGES:
        from donkeycar.parts.network import TCPServeValue
        from donkeycar.parts.image import ImgArrToJpg
        pub = TCPServeValue("camera")
        V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
        V.add(pub, inputs=['jpg/bin'])

    if type(ctr) is LocalWebController:
        print(
            "You can now go to <your pis hostname.local>:8887 to drive your car."
        )
    elif isinstance(ctr, JoystickController):
        print("You can now move your joystick to drive your car.")
        #tell the controller about the tub
        ctr.set_tub(tub)

        if cfg.BUTTON_PRESS_NEW_TUB:

            def new_tub_dir():
                V.parts.pop()
                tub = th.new_tub_writer(inputs=inputs,
                                        types=types,
                                        user_meta=meta)
                V.add(tub,
                      inputs=inputs,
                      outputs=["tub/num_records"],
                      run_condition='recording')
                ctr.set_tub(tub)

            ctr.set_button_down_trigger('cross', new_tub_dir)
        ctr.print_controls()

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #24
0
파일: manage.py 프로젝트: OrionWolfe/dkcar
def drive(cfg, model_path=None, use_joystick=False, use_chaos=False):
    """
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    """

    V = dk.vehicle.Vehicle()

    def get_timestamp():
        return time.time()

    clock = Lambda(get_timestamp)
    V.add(clock, outputs=['timestamp'])

    cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        ctr = JoystickController(max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
                                 steering_scale=cfg.JOYSTICK_STEERING_SCALE,
                                 auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
    else:
        # This web controller will create a web server that is capable
        # of managing steering, throttle, and modes, and more.
        ctr = LocalWebController(use_chaos=use_chaos)

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    curr_angle = 0.0
    def smooth_angle(in_angle):
        nonlocal curr_angle
        delta = in_angle - curr_angle
        if abs(delta) < 0.02:
            curr_angle = in_angle
        else:
            curr_angle = curr_angle + delta * 0.15

        print('smoothed angle:', curr_angle)
        return curr_angle

    angleSmoother = Lambda(smooth_angle)
    V.add(angleSmoother, inputs=['user/angle'], outputs=['user/angle'])


    # See if we should even run the pilot module.
    # This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'],
                                outputs=['run_pilot'])

    # Add velocity detector
    vd = Lambda(make_velocity_detector())
    V.add(vd, inputs=['cam/image_array'], outputs=['user/velocity'])

    """"
    def print_velocity(v):
        print('velocity:', v)
    pv = Lambda(print_velocity)
    V.add(pv, inputs=['user/velocity'])
    """

    # Run the pilot if the mode is not user.
    kl = KerasCategorical()
    if model_path:
        kl.load(model_path)

    V.add(kl, inputs=['cam/image_array'],
              outputs=['pilot/angle', 'pilot/velocity'],
              run_condition='run_pilot')

    def calc_throttle(inferred_v, actual_v):
        throttle = 0.38
        """
        if inferred_v > actual_v:
            throttle = min((inferred_v - actual_v) * 1.0, 0.5)
        print('V: inferred={} actual={} => throttle={}'.format(inferred_v, actual_v, throttle));
        """
        if (inferred_v > 5):
            throttle = 0.48
        elif (inferred_v > 4):
            throttle = 0.44
        elif (inferred_v > 3):
            throttle = 0.42
        elif (inferred_v > 2):
            throttle = 0.41
        elif (inferred_v > 1):
            throttle = 0.39
        return throttle

    ct = Lambda(calc_throttle)
    V.add(ct, inputs=['pilot/velocity', 'user/velocity'],
              outputs=['pilot/throttle'],
              run_condition='run_pilot')

    # Choose what inputs should change the car.
    def drive_mode(mode,
                   user_angle, user_throttle,
                   pilot_angle, pilot_throttle):
        if mode == 'user':
            # manual steer, manual throttle
            return user_angle, user_throttle

        elif mode == 'local_angle':
            # auto steer, manual throttle
            return pilot_angle, user_throttle

        else:
            # auto steer, auto throttle
            print('PILOT: angle={} throttle={}'.format(pilot_angle, pilot_throttle))
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=['user/mode', 'user/angle', 'user/throttle',
                  'pilot/angle', 'pilot/throttle'],
          outputs=['angle', 'throttle'])

    steering_controller = PCA9685(cfg.STEERING_CHANNEL)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    # add tub to save data
    inputs = ['cam/image_array', 'user/angle', 'user/velocity', 'user/throttle', 'user/mode', 'timestamp']
    types = ['image_array', 'float', 'float', 'float', 'str', 'float']

    #multiple tubs
    #th = TubHandler(path=cfg.DATA_PATH)
    #tub = th.new_tub_writer(inputs=inputs, types=types)

    # single tub
    tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    # run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
            max_loop_count=cfg.MAX_LOOPS)
예제 #25
0
파일: manage.py 프로젝트: swapdha/kong
def drive(cfg, model_path=None, use_joystick=False, use_chaos=False):
    """
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    """

    V = dk.vehicle.Vehicle()

    clock = Timestamp()
    V.add(clock, outputs='timestamp')

    cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    ctr = SerialController()

    V.add(ctr,
          inputs=[],
          outputs=['user/angle', 'user/throttle', 'user/mode'],
          threaded=True)

    V.add(LocalWebController(use_chaos=use_chaos),
          inputs=['cam/image_array'],
          outputs=['a', 'b', 'c', 'recording'],
          threaded=True)

    # See if we should even run the pilot module.
    # This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    # Run the pilot if the mode is not user.
    kl = KerasCategorical()
    if model_path:
        kl.load(model_path)

    V.add(kl,
          inputs=['cam/image_array'],
          outputs=['pilot/angle', 'pilot/throttle'],
          run_condition='run_pilot')

    # Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    steering_controller = PCA9685(cfg.STEERING_CHANNEL)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    # add tub to save data
    inputs = [
        'cam/image_array', 'user/angle', 'user/throttle', 'user/mode',
        'timestamp'
    ]
    types = ['image_array', 'float', 'float', 'str', 'str']

    #multiple tubs
    #th = TubHandler(path=cfg.DATA_PATH)
    #tub = th.new_tub_writer(inputs=inputs, types=types)

    # single tub
    tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    # run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #26
0
def drive(cfg, model_path=None, model_type=None):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    if model_type is None:
        model_type = cfg.DEFAULT_MODEL_TYPE

    #Initialize car
    V = dk.vehicle.Vehicle()

    cam = PiCamera(image_w=cfg.IMAGE_W,
                   image_h=cfg.IMAGE_H,
                   image_d=cfg.IMAGE_DEPTH)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    V.add(LocalWebController(),
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    class PilotCondition:
        def run(self, mode):
            if mode == 'user':
                return False
            else:
                return True

    V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    #Sombrero
    if cfg.HAVE_SOMBRERO:
        from donkeycar.parts.sombrero import Sombrero
        s = Sombrero()

    inputs = ['cam/image_array']

    def load_model(kl, model_path):
        start = time.time()
        try:
            print('loading model', model_path)
            kl.load(model_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except Exception as e:
            print(e)
            print('ERR>> problems loading model', model_path)

    def load_weights(kl, weights_path):
        start = time.time()
        try:
            print('loading model weights', weights_path)
            kl.model.load_weights(weights_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except Exception as e:
            print(e)
            print('ERR>> problems loading weights', weights_path)

    def load_model_json(kl, json_fnm):
        start = time.time()
        print('loading model json', json_fnm)
        import keras
        try:
            with open(json_fnm, 'r') as handle:
                contents = handle.read()
                kl.model = keras.models.model_from_json(contents)
            print('finished loading json in %s sec.' %
                  (str(time.time() - start)))
        except Exception as e:
            print(e)
            print("ERR>> problems loading model json", json_fnm)

    if model_path:
        #When we have a model, first create an appropriate Keras part
        kl = dk.utils.get_model_by_type(model_type, cfg)

        if '.h5' in model_path:
            #when we have a .h5 extension
            #load everything from the model file
            load_model(kl, model_path)

        elif '.json' in model_path:
            #when we have a .json extension
            #load the model from their and look for a matching
            #.wts file with just weights
            load_model_json(kl, model_path)
            weights_path = model_path.replace('.json', '.weights')
            load_weights(kl, weights_path)

        outputs = ['pilot/angle', 'pilot/throttle']

        V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot')

    #Choose what inputs should change the car.
    class DriveMode:
        def run(self, mode, user_angle, user_throttle, pilot_angle,
                pilot_throttle):
            if mode == 'user':
                return user_angle, user_throttle

            elif mode == 'local_angle':
                return pilot_angle, user_throttle

            else:
                return pilot_angle, pilot_throttle * cfg.AI_THROTTLE_MULT

    V.add(DriveMode(),
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    #Drive train setup

    from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle

    steering_controller = PCA9685(cfg.STEERING_CHANNEL,
                                  cfg.PCA9685_I2C_ADDR,
                                  busnum=cfg.PCA9685_I2C_BUSNUM)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL,
                                  cfg.PCA9685_I2C_ADDR,
                                  busnum=cfg.PCA9685_I2C_BUSNUM)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    #add tub to save data

    inputs = ['cam/image_array', 'user/angle', 'user/throttle', 'user/mode']

    types = ['image_array', 'float', 'float', 'str']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub,
          inputs=inputs,
          outputs=["tub/num_records"],
          run_condition='recording')

    print("You can now go to <your pi ip address>:8887 to drive your car.")

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #27
0
파일: donkey2.py 프로젝트: chankh/donkey
def drive(cfg, model_path=None, use_joystick=False, use_chaos=False):
    """
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    """

    V = dk.vehicle.Vehicle()

    clock = Timestamp()
    V.add(clock, outputs='timestamp')

    cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        ctr = JoystickController(
            max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
            steering_scale=cfg.JOYSTICK_STEERING_SCALE,
            auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
    else:
        # This web controller will create a web server that is capable
        # of managing steering, throttle, and modes, and more.
        ctr = LocalWebController(use_chaos=use_chaos)

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    # See if we should even run the pilot module.
    # This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    # Run the pilot if the mode is not user.
    kl = KerasCategorical()
    if model_path:
        kl.load(model_path)

    V.add(kl,
          inputs=['cam/image_array'],
          outputs=['pilot/angle', 'pilot/throttle'],
          run_condition='run_pilot')

    # Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    steering_controller = PCA9685(cfg.STEERING_CHANNEL)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    # add tub to save data
    inputs = [
        'cam/image_array', 'user/angle', 'user/throttle', 'user/mode',
        'timestamp'
    ]
    types = ['image_array', 'float', 'float', 'str', 'str']

    #multiple tubs
    #th = TubHandler(path=cfg.DATA_PATH)
    #tub = th.new_tub_writer(inputs=inputs, types=types)

    # single tub
    tub = TubWriter(path=cfg.TUB_PATH, inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    #setup AWS IoT
    if cfg.IOT_ENABLED:
        aws = AWSHandler(vehicle_id=cfg.VEHICLE_ID,
                         endpoint=cfg.AWS_ENDPOINT,
                         ca=cfg.CA_PATH,
                         private_key=cfg.PRIVATE_KEY_PATH,
                         certificate=cfg.CERTIFICATE_PATH)
        iot = aws.new_iot_publisher(intputs=inputs, types=types)
        V.add(iot, inputs=inputs, run_condition='recording')

    #run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #28
0
def drive(cfg, model_path=None, use_joystick=False, model_type=None, camera_type='single', meta=[]):
    """
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    """
    if model_type is None:
        if cfg.TRAIN_LOCALIZER:
            model_type = "localizer"
        elif cfg.TRAIN_BEHAVIORS:
            model_type = "behavior"
        else:
            model_type = cfg.DEFAULT_MODEL_TYPE

    # initialize car
    V = dk.vehicle.Vehicle()

    # setup camera
    if camera_type == "stereo":

        if cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam

            camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0)
            camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1)

        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam

            camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=0)
            camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam=1)
        else:
            raise (Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))

        V.add(camA, outputs=['cam/image_array_a'], threaded=True)
        V.add(camB, outputs=['cam/image_array_b'], threaded=True)

        from donkeycar.parts.image import StereoPair

        V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
              outputs=['cam/image_array'])

    else:
        print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv

        inputs = []
        threaded = True
        print("cfg.CAMERA_TYPE", cfg.CAMERA_TYPE)
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv
            cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, env_name=cfg.DONKEY_GYM_ENV_NAME)
            threaded = True
            inputs = ['angle', 'throttle']
        elif cfg.CAMERA_TYPE == "PICAM":
            from donkeycar.parts.camera import PiCamera
            cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam
            cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam
            cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CSIC":
            from donkeycar.parts.camera import CSICamera
            cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,
                            framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
        elif cfg.CAMERA_TYPE == "V4L":
            from donkeycar.parts.camera import V4LCamera
            cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH,
                            framerate=cfg.CAMERA_FRAMERATE)
        elif cfg.CAMERA_TYPE == "MOCK":
            from donkeycar.parts.camera import MockCamera
            cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        else:
            raise (Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

        V.add(cam, inputs=inputs, outputs=['cam/image_array'], threaded=threaded)

    # setup joystick/web controller
    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        # modify max_throttle closer to 1.0 to have more power
        # modify steering_scale lower than 1.0 to have less responsive steering
        logging.warning('Using joystick controller')

        ctr = get_js_controller(cfg)

        if cfg.USE_NETWORKED_JS:
            logging.warning('Using networked joystick controller')
            netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
            V.add(netwkJs, threaded=True)
            ctr.js = netwkJs

    else:
        # This web controller will create a web server that is capable
        # of managing steering, throttle, and modes, and more.
        logging.warning('Using web controller')
        ctr = LocalWebController()

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    # this throttle filter will allow one tap back for esc reverse
    th_filter = ThrottleFilter()
    V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    # See if we should even run the pilot module.
    # This is only needed because the part run_condition only accepts boolean
    class PilotCondition:
        def run(self, mode):
            if mode == 'user':
                return False
            else:
                return True

    V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    V.add(ctr,
          inputs=[],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)  # NOTE: user mode is given by controller, which can change DriveMode

    # this throttle filter will allow one tap back for esc reverse
    th_filter = ThrottleFilter()
    V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    # autopilot
    class ImgPreProcess():
        '''
        preprocess camera image for inference.
        normalize and crop if needed.
        '''

        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, img_arr):
            return normalize_and_crop(img_arr, self.cfg)

    if "coral" in model_type:
        inf_input = 'cam/image_array'
    else:
        inf_input = 'cam/normalized/cropped'
        V.add(ImgPreProcess(cfg),
              inputs=['cam/image_array'],
              outputs=[inf_input],
              run_condition='run_pilot')

    # Behavioral state
    if cfg.TRAIN_BEHAVIORS:
        bh = BehaviorPart(cfg.BEHAVIOR_LIST)
        V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
        try:
            ctr.set_button_down_trigger('L1', bh.increment_state)
        except:
            pass

        inputs = [inf_input, "behavior/one_hot_state_array"]
        # IMU
    elif model_type == "imu":
        assert (cfg.HAVE_IMU)
        # Run the pilot if the mode is not user.
        inputs = [inf_input,
                  'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
                  'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
    else:
        inputs = [inf_input]

    def load_model(kl, model_path):
        start = time.time()
        print('loading model', model_path)
        kl.load(model_path)
        print('finished loading in %s sec.' % (str(time.time() - start)))

    def load_weights(kl, weights_path):
        start = time.time()
        try:
            print('loading model weights', weights_path)
            kl.model.load_weights(weights_path)
            print('finished loading in %s sec.' % (str(time.time() - start)))
        except Exception as e:
            print(e)
            print('ERR>> problems loading weights', weights_path)

    def load_model_json(kl, json_fnm):
        start = time.time()
        print('loading model json', json_fnm)
        from tensorflow.python import keras
        try:
            with open(json_fnm, 'r') as handle:
                contents = handle.read()
                kl.model = keras.models.model_from_json(contents)
            print('finished loading json in %s sec.' % (str(time.time() - start)))
        except Exception as e:
            print(e)
            print("ERR>> problems loading model json", json_fnm)

    if model_path:
        # When we have a model, first create an appropriate Keras part
        kl = dk.utils.get_model_by_type(model_type, cfg)

        model_reload_cb = None

        if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
            # when we have a .h5 extension
            # load everything from the model file
            load_model(kl, model_path)

            def reload_model(filename):
                load_model(kl, filename)

            model_reload_cb = reload_model

        elif '.json' in model_path:
            # when we have a .json extension
            # load the model from there and look for a matching
            # .wts file with just weights
            load_model_json(kl, model_path)
            weights_path = model_path.replace('.json', '.weights')
            load_weights(kl, weights_path)

            def reload_weights(filename):
                weights_path = filename.replace('.json', '.weights')
                load_weights(kl, weights_path)

            model_reload_cb = reload_weights

        else:
            print("ERR>> Unknown extension type on model file!!")
            return

        outputs = ['pilot/angle', 'pilot/throttle']

        if cfg.TRAIN_LOCALIZER:
            outputs.append("pilot/loc")

        V.add(kl, inputs=inputs,
              outputs=outputs,
              run_condition='run_pilot')

    class AiRunCondition:
        '''
        A bool part to let us know when ai is running.
        '''

        def run(self, mode):
            if mode == "user":
                return False
            return True

    V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])

    # add LIDAR stuff
    # NOTE: using part from donkeyacc
    lidar_processor = LidarProcessor(non_block=True, debug=False)  # use non-block mode for more fps
    V.add(lidar_processor, outputs=['lidar/distance'], threaded=False)

    # add cruise controller
    # NOTE: using part from donkeyacc
    cruise_controller = CruiseController(kp=1, kd=0.8, default_distance=1.0,
                                         throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
                                         max_throttle=0.75,
                                         use_timer=True,
                                         debug=False)
    V.add(cruise_controller, inputs=['lidar/distance', 'user/throttle'], outputs=['acc/throttle'], threaded=False)

    # TODO: YOLO detection
    yolo_processor = YoloProcessor(non_block=True, warning=True, debug=True)    # TODO: disable debug mode
    V.add(yolo_processor, inputs=['cam/image_array'], outputs=['yolo/detection'], threaded=False)

    class DriveMode:
        """
        Change different drive modes: manual, adaptive CC, autopilot, etc...
        """
        def run(self, mode,
                user_angle, user_throttle,
                pilot_angle, pilot_throttle):
            # NOTE: pilot_throttle is now handled by ACC
            if mode == 'user':
                # using user angle and throttle
                return user_angle, user_throttle

            elif mode == 'local_angle':
                # using user angle and ACC throttle
                return user_angle, pilot_throttle

            else:
                # using autopilot angle and ACC throttle
                return pilot_angle, pilot_throttle

    V.add(DriveMode(),
          inputs=['user/mode', 'user/angle', 'user/throttle',
                  'pilot/angle', 'acc/throttle'],
          outputs=['angle', 'throttle'])

    # setup drive train
    steering_controller = PCA9685(
        cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(
        cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    # run the vehicle with drive loop frequency and max loops defined in config.py
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
            max_loop_count=cfg.MAX_LOOPS)
예제 #29
0
파일: complete.py 프로젝트: MPihlap/Donkeh
def drive(cfg, model_path=None, use_joystick=False, model_type=None,
          camera_type='single', meta=[]):
    """
    Construct a working robotic vehicle from many parts. Each part runs as a
    job in the Vehicle loop, calling either it's run or run_threaded method
    depending on the constructor flag `threaded`. All parts are updated one
    after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each
    part finishes processing in a timely manner. Parts may have named outputs
    and inputs. The framework handles passing named outputs to parts
    requesting the same named input.
    """
    logger.info(f'PID: {os.getpid()}')
    if cfg.DONKEY_GYM:
        #the simulator will use cuda and then we usually run out of resources
        #if we also try to use cuda. so disable for donkey_gym.
        os.environ["CUDA_VISIBLE_DEVICES"]="-1"

    if model_type is None:
        if cfg.TRAIN_LOCALIZER:
            model_type = "localizer"
        elif cfg.TRAIN_BEHAVIORS:
            model_type = "behavior"
        else:
            model_type = cfg.DEFAULT_MODEL_TYPE

    #Initialize car
    V = dk.vehicle.Vehicle()

    #Initialize logging before anything else to allow console logging
    if cfg.HAVE_CONSOLE_LOGGING:
        logger.setLevel(logging.getLevelName(cfg.LOGGING_LEVEL))
        ch = logging.StreamHandler()
        ch.setFormatter(logging.Formatter(cfg.LOGGING_FORMAT))
        logger.addHandler(ch)

    if cfg.HAVE_MQTT_TELEMETRY:
        from donkeycar.parts.telemetry import MqttTelemetry
        tel = MqttTelemetry(cfg)

    if cfg.HAVE_ODOM:
        if cfg.ENCODER_TYPE == "GPIO":
            from donkeycar.parts.encoder import RotaryEncoder
            enc = RotaryEncoder(mm_per_tick=0.306096, pin = cfg.ODOM_PIN, debug = cfg.ODOM_DEBUG)
            V.add(enc, inputs=['throttle'], outputs=['enc/speed'], threaded=True)
        elif cfg.ENCODER_TYPE == "arduino":
            from donkeycar.parts.encoder import ArduinoEncoder
            enc = ArduinoEncoder()
            V.add(enc, outputs=['enc/speed'], threaded=True)
        else:
            print("No supported encoder found")

    logger.info("cfg.CAMERA_TYPE %s"%cfg.CAMERA_TYPE)
    if camera_type == "stereo":

        if cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam

            camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
            camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)

        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam

            camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
            camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
        else:
            raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))

        V.add(camA, outputs=['cam/image_array_a'], threaded=True)
        V.add(camB, outputs=['cam/image_array_b'], threaded=True)

        from donkeycar.parts.image import StereoPair

        V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
            outputs=['cam/image_array'])
    elif cfg.CAMERA_TYPE == "D435":
        from donkeycar.parts.realsense435i import RealSense435i
        cam = RealSense435i(
            enable_rgb=cfg.REALSENSE_D435_RGB,
            enable_depth=cfg.REALSENSE_D435_DEPTH,
            enable_imu=cfg.REALSENSE_D435_IMU,
            device_id=cfg.REALSENSE_D435_ID)
        V.add(cam, inputs=[],
              outputs=['cam/image_array', 'cam/depth_array',
                       'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
                       'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
              threaded=True)

    else:
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv

        inputs = []
        outputs = ['cam/image_array']
        threaded = True
        if cfg.DONKEY_GYM:
            from donkeycar.parts.dgym import DonkeyGymEnv 
            #rbx
            cam = DonkeyGymEnv(cfg.DONKEY_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.DONKEY_GYM_ENV_NAME, conf=cfg.GYM_CONF, record_location=cfg.SIM_RECORD_LOCATION, record_gyroaccel=cfg.SIM_RECORD_GYROACCEL, record_velocity=cfg.SIM_RECORD_VELOCITY, delay=cfg.SIM_ARTIFICIAL_LATENCY)
            threaded = True
            inputs  = ['angle', 'throttle']
        elif cfg.CAMERA_TYPE == "PICAM":
            from donkeycar.parts.camera import PiCamera
            cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
        elif cfg.CAMERA_TYPE == "WEBCAM":
            from donkeycar.parts.camera import Webcam
            cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CVCAM":
            from donkeycar.parts.cv import CvCam
            cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "CSIC":
            from donkeycar.parts.camera import CSICamera
            cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
        elif cfg.CAMERA_TYPE == "V4L":
            from donkeycar.parts.camera import V4LCamera
            cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
        elif cfg.CAMERA_TYPE == "MOCK":
            from donkeycar.parts.camera import MockCamera
            cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
        elif cfg.CAMERA_TYPE == "IMAGE_LIST":
            from donkeycar.parts.camera import ImageListCamera
            cam = ImageListCamera(path_mask=cfg.PATH_MASK)
        elif cfg.CAMERA_TYPE == "LEOPARD":
            from donkeycar.parts.leopard_imaging import LICamera
            cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE)
        else:
            raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))

        # add lidar
        if cfg.USE_LIDAR:
            from donkeycar.parts.lidar import RPLidar
            if cfg.LIDAR_TYPE == 'RP':
                print("adding RP lidar part")
                lidar = RPLidar(lower_limit = cfg.LIDAR_LOWER_LIMIT, upper_limit = cfg.LIDAR_UPPER_LIMIT)
                V.add(lidar, inputs=[],outputs=['lidar/dist_array'], threaded=True)
            if cfg.LIDAR_TYPE == 'YD':
                print("YD Lidar not yet supported")

        # Donkey gym part will output position information if it is configured
        if cfg.DONKEY_GYM:
            if cfg.SIM_RECORD_LOCATION:
                outputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
            if cfg.SIM_RECORD_GYROACCEL:
                outputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
            if cfg.SIM_RECORD_VELOCITY:
                outputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
            
        V.add(cam, inputs=inputs, outputs=outputs, threaded=threaded)

    #This web controller will create a web server that is capable
    #of managing steering, throttle, and modes, and more.
    ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
    
    V.add(ctr,
        inputs=['cam/image_array', 'tub/num_records'],
        outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
        threaded=True)
        
    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        if cfg.CONTROLLER_TYPE == "MM1":
            from donkeycar.parts.robohat import RoboHATController            
            ctr = RoboHATController(cfg)
        elif "custom" == cfg.CONTROLLER_TYPE:
            #
            # custom controller created with `donkey createjs` command
            #
            from my_joystick import MyJoystickController
            ctr = MyJoystickController(
                throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
                throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
                steering_scale=cfg.JOYSTICK_STEERING_SCALE,
                auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
            ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
        else:
            from donkeycar.parts.controller import get_js_controller

            ctr = get_js_controller(cfg)

            if cfg.USE_NETWORKED_JS:
                from donkeycar.parts.controller import JoyStickSub
                netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
                V.add(netwkJs, threaded=True)
                ctr.js = netwkJs
        
        V.add(ctr, 
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    #this throttle filter will allow one tap back for esc reverse
    th_filter = ThrottleFilter()
    V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    class PilotCondition:
        def run(self, mode):
            if mode == 'user':
                return False
            else:
                return True

    V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])

    class LedConditionLogic:
        def __init__(self, cfg):
            self.cfg = cfg

        def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
            #returns a blink rate. 0 for off. -1 for on. positive for rate.

            if track_loc is not None:
                led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
                return -1

            if model_file_changed:
                led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
                return 0.1
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if recording_alert:
                led.set_rgb(*recording_alert)
                return self.cfg.REC_COUNT_ALERT_BLINK_RATE
            else:
                led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)

            if behavior_state is not None and model_type == 'behavior':
                r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
                led.set_rgb(r, g, b)
                return -1 #solid on

            if recording:
                return -1 #solid on
            elif mode == 'user':
                return 1
            elif mode == 'local_angle':
                return 0.5
            elif mode == 'local':
                return 0.1
            return 0

    if cfg.HAVE_RGB_LED and not cfg.DONKEY_GYM:
        from donkeycar.parts.led_status import RGB_LED
        led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
        led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)

        V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
              outputs=['led/blink_rate'])

        V.add(led, inputs=['led/blink_rate'])

    def get_record_alert_color(num_records):
        col = (0, 0, 0)
        for count, color in cfg.RECORD_ALERT_COLOR_ARR:
            if num_records >= count:
                col = color
        return col

    class RecordTracker:
        def __init__(self):
            self.last_num_rec_print = 0
            self.dur_alert = 0
            self.force_alert = 0

        def run(self, num_records):
            if num_records is None:
                return 0

            if self.last_num_rec_print != num_records or self.force_alert:
                self.last_num_rec_print = num_records

                if num_records % 10 == 0:
                    print("recorded", num_records, "records")

                if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
                    self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
                    self.force_alert = 0

            if self.dur_alert > 0:
                self.dur_alert -= 1

            if self.dur_alert != 0:
                return get_record_alert_color(num_records)

            return 0

    rec_tracker_part = RecordTracker()
    V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])

    if cfg.AUTO_RECORD_ON_THROTTLE and isinstance(ctr, JoystickController):
        #then we are not using the circle button. hijack that to force a record count indication
        def show_record_acount_status():
            rec_tracker_part.last_num_rec_print = 0
            rec_tracker_part.force_alert = 1
        ctr.set_button_down_trigger('circle', show_record_acount_status)

    #Sombrero
    if cfg.HAVE_SOMBRERO:
        from donkeycar.parts.sombrero import Sombrero
        s = Sombrero()

    #IMU
    if cfg.HAVE_IMU:
        from donkeycar.parts.imu import IMU
        imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
        V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
            'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)

    # Use the FPV preview, which will show the cropped image output, or the full frame.
    if cfg.USE_FPV:
        V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)

    #Behavioral state
    if cfg.TRAIN_BEHAVIORS:
        bh = BehaviorPart(cfg.BEHAVIOR_LIST)
        V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
        try:
            ctr.set_button_down_trigger('L1', bh.increment_state)
        except:
            pass

        inputs = ['cam/image_array', "behavior/one_hot_state_array"]
    #IMU
    elif cfg.USE_LIDAR:
        inputs = ['cam/image_array', 'lidar/dist_array']

    elif cfg.HAVE_ODOM:
        inputs = ['cam/image_array', 'enc/speed']

    elif model_type == "imu":
        assert(cfg.HAVE_IMU)
        #Run the pilot if the mode is not user.
        inputs=['cam/image_array',
            'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
            'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
    elif cfg.USE_LIDAR:
        inputs = ['cam/image_array', 'lidar/dist_array']
    else:
        inputs=['cam/image_array']

    def load_model(kl, model_path):
        start = time.time()
        print('loading model', model_path)
        kl.load(model_path)
        print('finished loading in %s sec.' % (str(time.time() - start)) )

    def load_weights(kl, weights_path):
        start = time.time()
        try:
            print('loading model weights', weights_path)
            kl.model.load_weights(weights_path)
            print('finished loading in %s sec.' % (str(time.time() - start)) )
        except Exception as e:
            print(e)
            print('ERR>> problems loading weights', weights_path)

    def load_model_json(kl, json_fnm):
        start = time.time()
        print('loading model json', json_fnm)
        from tensorflow.python import keras
        try:
            with open(json_fnm, 'r') as handle:
                contents = handle.read()
                kl.model = keras.models.model_from_json(contents)
            print('finished loading json in %s sec.' % (str(time.time() - start)) )
        except Exception as e:
            print(e)
            print("ERR>> problems loading model json", json_fnm)

    if model_path:
        #When we have a model, first create an appropriate Keras part
        kl = dk.utils.get_model_by_type(model_type, cfg)

        model_reload_cb = None

        if '.h5' in model_path or '.uff' in model_path or 'tflite' in model_path or '.pkl' in model_path:
            #when we have a .h5 extension
            #load everything from the model file
            load_model(kl, model_path)

            def reload_model(filename):
                load_model(kl, filename)

            model_reload_cb = reload_model

        elif '.json' in model_path:
            #when we have a .json extension
            #load the model from there and look for a matching
            #.wts file with just weights
            load_model_json(kl, model_path)
            weights_path = model_path.replace('.json', '.weights')
            load_weights(kl, weights_path)

            def reload_weights(filename):
                weights_path = filename.replace('.json', '.weights')
                load_weights(kl, weights_path)

            model_reload_cb = reload_weights

        else:
            print("ERR>> Unknown extension type on model file!!")
            return

        #this part will signal visual LED, if connected
        V.add(FileWatcher(model_path, verbose=True), outputs=['modelfile/modified'])

        #these parts will reload the model file, but only when ai is running so we don't interrupt user driving
        V.add(FileWatcher(model_path), outputs=['modelfile/dirty'], run_condition="ai_running")
        V.add(DelayedTrigger(100), inputs=['modelfile/dirty'], outputs=['modelfile/reload'], run_condition="ai_running")
        V.add(TriggeredCallback(model_path, model_reload_cb), inputs=["modelfile/reload"], run_condition="ai_running")

        outputs=['pilot/angle', 'pilot/throttle']

        if cfg.TRAIN_LOCALIZER:
            outputs.append("pilot/loc")

        V.add(kl, inputs=inputs,
              outputs=outputs,
              run_condition='run_pilot')
    
    if cfg.STOP_SIGN_DETECTOR:
        from donkeycar.parts.object_detector.stop_sign_detector import StopSignDetector
        V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE, cfg.STOP_SIGN_SHOW_BOUNDING_BOX), inputs=['cam/image_array', 'pilot/throttle'], outputs=['pilot/throttle', 'cam/image_array'])

    #Choose what inputs should change the car.
    class DriveMode:
        def run(self, mode,
                    user_angle, user_throttle,
                    pilot_angle, pilot_throttle):
            if mode == 'user':
                return user_angle, user_throttle

            elif mode == 'local_angle':
                return pilot_angle if pilot_angle else 0.0, user_throttle

            else:
                return pilot_angle if pilot_angle else 0.0, pilot_throttle * cfg.AI_THROTTLE_MULT if pilot_throttle else 0.0

    V.add(DriveMode(),
          inputs=['user/mode', 'user/angle', 'user/throttle',
                  'pilot/angle', 'pilot/throttle'],
          outputs=['angle', 'throttle'])


    #to give the car a boost when starting ai mode in a race.
    aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)

    V.add(aiLauncher,
        inputs=['user/mode', 'throttle'],
        outputs=['throttle'])

    if isinstance(ctr, JoystickController):
        ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)


    class AiRunCondition:
        '''
        A bool part to let us know when ai is running.
        '''
        def run(self, mode):
            if mode == "user":
                return False
            return True

    V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])

    #Ai Recording
    class AiRecordingCondition:
        '''
        return True when ai mode, otherwize respect user mode recording flag
        '''
        def run(self, mode, recording):
            if mode == 'user':
                return recording
            return True

    if cfg.RECORD_DURING_AI:
        V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])

    #Drive train setup
    if cfg.DONKEY_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
        pass
    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_ESC":
        from donkeycar.parts.actuator import PCA9685, PWMSteering, PWMThrottle

        steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
        steering = PWMSteering(controller=steering_controller,
                                        left_pulse=cfg.STEERING_LEFT_PWM,
                                        right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
        throttle = PWMThrottle(controller=throttle_controller,
                                        max_pulse=cfg.THROTTLE_FORWARD_PWM,
                                        zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                                        min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['angle'], threaded=True)
        V.add(throttle, inputs=['throttle'], threaded=True)


    elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM

        steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
        throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])


    elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM

        left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
        right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
                inputs=['throttle', 'angle'],
                outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])

    elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL_L298N":
        from donkeycar.parts.actuator import TwoWheelSteeringThrottle, L298N_HBridge_DC_Motor

        left_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_LEFT_FWD, cfg.HBRIDGE_L298N_PIN_LEFT_BWD, cfg.HBRIDGE_L298N_PIN_LEFT_EN)
        right_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_RIGHT_FWD, cfg.HBRIDGE_L298N_PIN_RIGHT_BWD, cfg.HBRIDGE_L298N_PIN_RIGHT_EN)
        two_wheel_control = TwoWheelSteeringThrottle()

        V.add(two_wheel_control,
                inputs=['throttle', 'angle'],
                outputs=['left_motor_speed', 'right_motor_speed'])

        V.add(left_motor, inputs=['left_motor_speed'])
        V.add(right_motor, inputs=['right_motor_speed'])


    elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
        from donkeycar.parts.actuator import ServoBlaster, PWMSteering
        steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
        #PWM pulse values should be in the range of 100 to 200
        assert(cfg.STEERING_LEFT_PWM <= 200)
        assert(cfg.STEERING_RIGHT_PWM <= 200)
        steering = PWMSteering(controller=steering_controller,
                                        left_pulse=cfg.STEERING_LEFT_PWM,
                                        right_pulse=cfg.STEERING_RIGHT_PWM)


        from donkeycar.parts.actuator import Mini_HBridge_DC_Motor_PWM
        motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)

        V.add(steering, inputs=['angle'], threaded=True)
        V.add(motor, inputs=["throttle"])
        
    elif cfg.DRIVE_TRAIN_TYPE == "MM1":
        from donkeycar.parts.robohat import RoboHATDriver
        V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
    
    elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
        from donkeycar.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
        steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
        steering = PWMSteering(controller=steering_controller,
                                        left_pulse=cfg.STEERING_LEFT_PWM, 
                                        right_pulse=cfg.STEERING_RIGHT_PWM)
        
        throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
        throttle = PWMThrottle(controller=throttle_controller,
                                            max_pulse=cfg.THROTTLE_FORWARD_PWM,
                                            zero_pulse=cfg.THROTTLE_STOPPED_PWM, 
                                            min_pulse=cfg.THROTTLE_REVERSE_PWM)
        V.add(steering, inputs=['angle'], threaded=True)
        V.add(throttle, inputs=['throttle'], threaded=True)

    # OLED setup
    if cfg.USE_SSD1306_128_32:
        from donkeycar.parts.oled import OLEDPart
        auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
        oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_BUSNUM, auto_record_on_throttle=auto_record_on_throttle)
        V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)

    #add tub to save data

    if cfg.USE_LIDAR:
        inputs = ['cam/image_array', 'lidar/dist_array', 'user/angle', 'user/throttle', 'user/mode']
        types = ['image_array', 'nparray','float', 'float', 'str']
    else:
        inputs=['cam/image_array','user/angle', 'user/throttle', 'user/mode']
        types=['image_array','float', 'float','str']

    if cfg.USE_LIDAR:
        inputs += ['lidar/dist_array']
        types += ['nparray']

    if cfg.HAVE_ODOM:
        inputs += ['enc/speed']
        types += ['float']

    if cfg.TRAIN_BEHAVIORS:
        inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
        types += ['int', 'str', 'vector']

    if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
        inputs += ['cam/depth_array']
        types += ['gray16_array']

    if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
        inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
            'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']

        types +=['float', 'float', 'float',
           'float', 'float', 'float']

    # rbx
    if cfg.DONKEY_GYM:
        if cfg.SIM_RECORD_LOCATION:  
            inputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
            types  += ['float', 'float', 'float', 'float', 'float']
        if cfg.SIM_RECORD_GYROACCEL: 
            inputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
            types  += ['float', 'float', 'float', 'float', 'float', 'float']
        if cfg.SIM_RECORD_VELOCITY:  
            inputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
            types  += ['float', 'float', 'float']

    if cfg.RECORD_DURING_AI:
        inputs += ['pilot/angle', 'pilot/throttle']
        types += ['float', 'float']

    if cfg.HAVE_PERFMON:
        from donkeycar.parts.perfmon import PerfMonitor
        mon = PerfMonitor(cfg)
        perfmon_outputs = ['perf/cpu', 'perf/mem', 'perf/freq']
        inputs += perfmon_outputs
        types += ['float', 'float', 'float']
        V.add(mon, inputs=[], outputs=perfmon_outputs, threaded=True)

    # do we want to store new records into own dir or append to existing
    tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
        cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
    tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta)
    V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')

    # Telemetry (we add the same metrics added to the TubHandler
    if cfg.HAVE_MQTT_TELEMETRY:
        telem_inputs, _ = tel.add_step_inputs(inputs, types)
        V.add(tel, inputs=telem_inputs, outputs=["tub/queue_size"], threaded=True)

    if cfg.PUB_CAMERA_IMAGES:
        from donkeycar.parts.network import TCPServeValue
        from donkeycar.parts.image import ImgArrToJpg
        pub = TCPServeValue("camera")
        V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
        V.add(pub, inputs=['jpg/bin'])

    if type(ctr) is LocalWebController:
        if cfg.DONKEY_GYM:
            print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
        else:
            print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
    elif isinstance(ctr, JoystickController):
        print("You can now move your joystick to drive your car.")
        ctr.set_tub(tub_writer.tub)
        ctr.print_controls()

    #run the vehicle for 20 seconds
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
예제 #30
0
def drive(cfg, model_path=None, model_wrapper=None, debug=False):
    """
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    """

    V = dk.vehicle.Vehicle()

    if not debug:
        from donkeycar.parts.camera import PiCamera
        cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
        V.add(cam, outputs=['cam/image_array'], threaded=True)

    else:
        print("Debug : ignoring camera.")
        def fake_cam():
            from PIL import Image
            import numpy as np
            dir_path = os.path.dirname(os.path.realpath(__file__))
            img = Image.open(os.path.join(dir_path, 'img.jpg'))
            a = np.array(img)
            return a

        fake_cam_part = Lambda(fake_cam)
        V.add(fake_cam_part, outputs=['cam/image_array'])

    ctr = LocalWebController(use_chaos=False)

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    # See if we should even run the pilot module.
    # This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'],
                                outputs=['run_pilot'])

    # Run the pilot if the mode is not user.
    if model_path:
        model = load_model(model_path)
        model_wrapper_class = getattr(model_wrappers, model_wrapper)
        model_instance = model_wrapper_class(model=model)

        V.add(model_instance, inputs=['cam/image_array'],
                              outputs=['pilot/angle1', 'pilot/throttle1'],
                              run_condition='run_pilot')

        def bound_throttle(pilot_angle1, pilot_throttle1):
            if pilot_angle1 is None:
                pilot_angle1 = 0
            if pilot_throttle1 is None:
                pilot_throttle1 = 0
            pilot_throttle = min(1, pilot_throttle1)
            pilot_throttle = max(0, pilot_throttle)
            return pilot_angle1, pilot_throttle

        bound_throttle_part = Lambda(bound_throttle)

        V.add(bound_throttle_part,
              inputs=['pilot/angle1', 'pilot/throttle1'],
              outputs=['pilot/angle', 'pilot/throttle'])


    # add tub to save data

    set_path = os.path.join(ROOT_PATH, "set_{}".format(datetime.now().strftime("%Y%m%d%H%M%S")))
    os.makedirs(set_path)
    inputs = ['cam/image_array', 'user/angle', 'user/throttle']  # 'user/mode'
    rec = Recorder(path=set_path)
    V.add(rec, inputs=inputs, run_condition='recording')

    # Choose what inputs should change the car.
    def drive_mode(mode,
                   user_angle, user_throttle,
                   pilot_angle, pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=['user/mode', 'user/angle', 'user/throttle',
                  'pilot/angle', 'pilot/throttle'],
          outputs=['angle', 'throttle'])
    if not debug:
        steering_controller = PCA9685(cfg.STEERING_CHANNEL)
        steering = PWMSteering(controller=steering_controller,
                               left_pulse=cfg.STEERING_LEFT_PWM,
                               right_pulse=cfg.STEERING_RIGHT_PWM)

        throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
        throttle = PWMThrottle(controller=throttle_controller,
                               max_pulse=cfg.THROTTLE_FORWARD_PWM,
                               zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                               min_pulse=cfg.THROTTLE_REVERSE_PWM)

        V.add(steering, inputs=['angle'])
        V.add(throttle, inputs=['throttle'])

    else:
        print("Debug : ignoring controller part.")

    # run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ,
            max_loop_count=cfg.MAX_LOOPS)