예제 #1
0
def get_model_by_type(model_type, cfg):
    from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN, KerasLocalizer, KerasLatent
 
    if model_type is None:
        model_type = "categorical"

    input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)
    roi_crop = (cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM)

    if model_type == "localizer" or cfg.TRAIN_LOCALIZER:
        kl = KerasLocalizer(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), num_locations=cfg.NUM_LOCATIONS, input_shape=input_shape)
    elif model_type == "behavior" or cfg.TRAIN_BEHAVIORS:
        kl = KerasBehavioral(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), input_shape=input_shape)        
    elif model_type == "imu":
        kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape)        
    elif model_type == "linear":
        kl = KerasLinear(input_shape=input_shape, roi_crop=roi_crop)
    elif model_type == "3d":
        kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH)
    elif model_type == "rnn":
        kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH)
    elif model_type == "categorical":
        kl = KerasCategorical(input_shape=input_shape, throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE, roi_crop=roi_crop)
    elif model_type == "latent":
        kl = KerasLatent(input_shape=input_shape)
    else:
        raise Exception("unknown model type: %s" % model_type)

    return kl
예제 #2
0
파일: utils.py 프로젝트: vivekchand/donkey
def get_model_by_type(model_type, cfg):
    from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN

    if model_type is None:
        model_type = "categorical"

    input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)

    if model_type == "behavior" or cfg.TRAIN_BEHAVIORS:
        kl = KerasBehavioral(num_outputs=2,
                             num_behavior_inputs=len(cfg.BEHAVIOR_LIST),
                             input_shape=input_shape)
    elif model_type == "imu":
        kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape)
    elif model_type == "linear":
        kl = KerasLinear(input_shape=input_shape)
    elif model_type == "3d":
        kl = Keras3D_CNN(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         seq_length=cfg.SEQUENCE_LENGTH)
    elif model_type == "rnn":
        kl = KerasRNN_LSTM(seq_length=cfg.SEQUENCE_LENGTH,
                           input_shape=input_shape)
    elif model_type == "categorical":
        kl = KerasCategorical(input_shape=input_shape)
    else:
        raise Exception("unknown model type: %s" % model_type)

    return kl
예제 #3
0
파일: utils.py 프로젝트: gis81576/donkeycar
def get_model_by_type(model_type, cfg):
    '''
    given the string model_type and the configuration settings in cfg
    create a Keras model and return it.
    '''
    from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN, KerasLocalizer, KerasLatent
    from donkeycar.parts.tflite import TFLitePilot

    if model_type is None:
        model_type = cfg.DEFAULT_MODEL_TYPE
    print("\"get_model_by_type\" model Type is: {}".format(model_type))

    input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)
    roi_crop = (cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM)

    if model_type == "tflite_linear":
        kl = TFLitePilot()
    elif model_type == "localizer" or cfg.TRAIN_LOCALIZER:
        kl = KerasLocalizer(num_outputs=2,
                            num_behavior_inputs=len(cfg.BEHAVIOR_LIST),
                            num_locations=cfg.NUM_LOCATIONS,
                            input_shape=input_shape)
    elif model_type == "behavior" or cfg.TRAIN_BEHAVIORS:
        kl = KerasBehavioral(num_outputs=2,
                             num_behavior_inputs=len(cfg.BEHAVIOR_LIST),
                             input_shape=input_shape)
    elif model_type == "imu":
        kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape)
    elif model_type == "linear":
        kl = KerasLinear(input_shape=input_shape, roi_crop=roi_crop)
    elif model_type == "tensorrt_linear":
        # Aggressively lazy load this. This module imports pycuda.autoinit which causes a lot of unexpected things
        # to happen when using TF-GPU for training.
        from donkeycar.parts.tensorrt import TensorRTLinear
        kl = TensorRTLinear(cfg=cfg)
    elif model_type == "3d":
        kl = Keras3D_CNN(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         seq_length=cfg.SEQUENCE_LENGTH)
    elif model_type == "rnn":
        kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W,
                           image_h=cfg.IMAGE_H,
                           image_d=cfg.IMAGE_DEPTH,
                           seq_length=cfg.SEQUENCE_LENGTH)
    elif model_type == "categorical":
        kl = KerasCategorical(
            input_shape=input_shape,
            throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE,
            roi_crop=roi_crop)
    elif model_type == "latent":
        kl = KerasLatent(input_shape=input_shape)
    else:
        raise Exception("unknown model type: %s" % model_type)

    return kl
예제 #4
0
def sequence_train(cfg, tub_names, model_name, transfer_model, model_type, continuous, aug):
    '''
    use the specified data in tub_names to train an artifical neural network
    saves the output trained model as model_name
    trains models which take sequence of images
    '''
    assert(not continuous)

    print("sequence of images training")

    if model_type == "rnn":
        kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W,
            image_h=cfg.IMAGE_H,
            image_d=cfg.IMAGE_DEPTH,
            seq_length=cfg.SEQUENCE_LENGTH, num_outputs=2)

    elif model_type == "3d":
        kl = Keras3D_CNN(image_w=cfg.IMAGE_W,
            image_h=cfg.IMAGE_H,
            image_d=cfg.IMAGE_DEPTH,
            seq_length=cfg.SEQUENCE_LENGTH,
            num_outputs=2)
    else:
        raise Exception("unknown model type: %s" % model_type)

    tubs = gather_tubs(cfg, tub_names)

    records = []

    for tub in tubs:
        record_paths = glob.glob(os.path.join(tub.path, 'record_*.json'))
        print("Tub:", tub.path, "has", len(record_paths), 'records')

        record_paths.sort(key=get_record_index)
        records += record_paths


    print('collating records')
    gen_records = {}

    for record_path in records:

        with open(record_path, 'r') as fp:
            json_data = json.load(fp)

        basepath = os.path.dirname(record_path)
        image_filename = json_data["cam/image_array"]
        image_path = os.path.join(basepath, image_filename)
        sample = { 'record_path' : record_path, "image_path" : image_path, "json_data" : json_data }

        sample["tub_path"] = basepath
        sample["index"] = get_image_index(image_filename)

        angle = float(json_data['user/angle'])
        throttle = float(json_data["user/throttle"])

        sample['target_output'] = np.array([angle, throttle])

        sample['img_data'] = None

        key = make_key(sample)

        gen_records[key] = sample



    print('collating sequences')

    sequences = []

    for k, sample in gen_records.items():

        seq = []

        for i in range(cfg.SEQUENCE_LENGTH):
            key = make_next_key(sample, i)
            if key in gen_records:
                seq.append(gen_records[key])
            else:
                continue

        if len(seq) != cfg.SEQUENCE_LENGTH:
            continue

        sequences.append(seq)



    #shuffle and split the data
    train_data, val_data  = train_test_split(sequences, shuffle=True, test_size=(1 - cfg.TRAIN_TEST_SPLIT))


    def generator(data, batch_size=cfg.BATCH_SIZE):
        num_records = len(data)

        while True:
            #shuffle again for good measure
            data = shuffle(data)

            for offset in range(0, num_records, batch_size):
                batch_data = data[offset:offset+batch_size]

                if len(batch_data) != batch_size:
                    break

                b_inputs_img = []
                b_labels = []

                for seq in batch_data:
                    inputs_img = []
                    labels = []
                    for record in seq:
                        #get image data if we don't already have it
                        if record['img_data'] is None:
                            img_arr = load_scaled_image_arr(record['image_path'], cfg)
                            if img_arr is None:
                                break
                            if aug:
                                img_arr = augment_image(img_arr)
                            if cfg.CACHE_IMAGES:
                                record['img_data'] = img_arr
                        else:
                            img_arr = record['img_data']                            
                            
                        inputs_img.append(img_arr)
                    
                    if img_arr is None:
                        continue

                    labels.append(seq[-1]['target_output'])

                    b_inputs_img.append(inputs_img)
                    b_labels.append(labels)

                X = [np.array(b_inputs_img).reshape(batch_size,\
                    cfg.SEQUENCE_LENGTH, cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)]

                y = np.array(b_labels).reshape(batch_size, 2)

                yield X, y

    train_gen = generator(train_data)
    val_gen = generator(val_data)
    

    model_path = os.path.expanduser(model_name)

    total_records = len(sequences)
    total_train = len(train_data)
    total_val = len(val_data)

    print('train: %d, validation: %d' %(total_train, total_val))
    steps_per_epoch = total_train // cfg.BATCH_SIZE
    print('steps_per_epoch', steps_per_epoch)

    if steps_per_epoch < 2:
        raise Exception("Too little data to train. Please record more records.")

    kl.train(train_gen, 
        val_gen, 
        saved_model_path=model_path,
        steps=steps_per_epoch,
        train_split=cfg.TRAIN_TEST_SPLIT,
        use_early_stop = cfg.USE_EARLY_STOP)
예제 #5
0
파일: base.py 프로젝트: vivekchand/donkey
    def run(self, args):
        '''
        Start a websocket SocketIO server to talk to a donkey simulator
        '''
        import socketio
        from donkeycar.parts.simulation import SteeringServer
        from donkeycar.parts.keras import KerasCategorical, KerasLinear,\
            Keras3D_CNN, KerasRNN_LSTM

        args, parser = self.parse_args(args)

        cfg = load_config(args.config)

        if cfg is None:
            return

        #TODO: this logic should be in a pilot or model handler part.
        if args.type == "categorical":
            kl = KerasCategorical()
        elif args.type == "linear":
            kl = KerasLinear(num_outputs=2)
        elif args.type == "rnn":
            kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W,
                               image_h=cfg.IMAGE_H,
                               image_d=cfg.IMAGE_DEPTH,
                               seq_length=cfg.SEQUENCE_LENGTH,
                               num_outputs=2)
        elif args.type == "3d":
            kl = Keras3D_CNN(image_w=cfg.IMAGE_W,
                             image_h=cfg.IMAGE_H,
                             image_d=cfg.IMAGE_DEPTH,
                             seq_length=cfg.SEQUENCE_LENGTH,
                             num_outputs=2)
        else:
            print("didn't recognize type:", args.type)
            return

        #can provide an optional image filter part
        img_stack = None

        #load keras model
        kl.load(args.model)

        #start socket server framework
        sio = socketio.Server()

        top_speed = float(args.top_speed)

        #start sim server handler
        ss = SteeringServer(sio,
                            kpart=kl,
                            top_speed=top_speed,
                            image_part=img_stack)

        #register events and pass to server handlers

        @sio.on('telemetry')
        def telemetry(sid, data):
            ss.telemetry(sid, data)

        @sio.on('connect')
        def connect(sid, environ):
            ss.connect(sid, environ)

        ss.go(('0.0.0.0', 9090))
예제 #6
0
def drive(cfg, model_path=None, model_type='categorical', use_joystick=False):
    '''
    Construct a working robotic vehicle from many parts.
    Each part runs as a job in the Vehicle loop, calling either
    it's run or run_threaded method depending on the constructor flag `threaded`.
    All parts are updated one after another at the framerate given in
    cfg.DRIVE_LOOP_HZ assuming each part finishes processing in a timely manner.
    Parts may have named outputs and inputs. The framework handles passing named outputs
    to parts requesting the same named input.
    '''

    #Initialize car
    V = dk.vehicle.Vehicle()
    cam = PiCamera(resolution=cfg.CAMERA_RESOLUTION)
    V.add(cam, outputs=['cam/image_array'], threaded=True)

    if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
        #modify max_throttle closer to 1.0 to have more power
        #modify steering_scale lower than 1.0 to have less responsive steering
        ctr = JoystickController(
            max_throttle=cfg.JOYSTICK_MAX_THROTTLE,
            steering_scale=cfg.JOYSTICK_STEERING_SCALE,
            auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
    else:
        #This web controller will create a web server that is capable
        #of managing steering, throttle, and modes, and more.
        ctr = LocalWebController()

    V.add(ctr,
          inputs=['cam/image_array'],
          outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
          threaded=True)

    #See if we should even run the pilot module.
    #This is only needed because the part run_condition only accepts boolean
    def pilot_condition(mode):
        if mode == 'user':
            return False
        else:
            return True

    pilot_condition_part = Lambda(pilot_condition)
    V.add(pilot_condition_part, inputs=['user/mode'], outputs=['run_pilot'])

    #Run the pilot if the mode is not user.
    kl = KerasCategorical()
    if model_type == 'linear':
        kl = KerasLinear()
    if model_type == 'hres_cat':
        kl = KerasHresCategorical()
    # Change model type accordingly
    if (model_type == 'rnn'):
        kl = KerasRNN_LSTM()
    if (model_type == 'rnn_bin'):
        kl = KerasRNN_Categorical()
    if model_path:
        #kl.load(model_path)
        #kl = dk.utils.get_model_by_type(model_type, cfg)
        kl.load(model_path)

    V.add(kl,
          inputs=['cam/image_array'],
          outputs=['pilot/angle', 'pilot/throttle'],
          run_condition='run_pilot')

    #Choose what inputs should change the car.
    def drive_mode(mode, user_angle, user_throttle, pilot_angle,
                   pilot_throttle):
        if mode == 'user':
            return user_angle, user_throttle

        elif mode == 'local_angle':
            return pilot_angle, user_throttle

        else:
            return pilot_angle, pilot_throttle

    drive_mode_part = Lambda(drive_mode)
    V.add(drive_mode_part,
          inputs=[
              'user/mode', 'user/angle', 'user/throttle', 'pilot/angle',
              'pilot/throttle'
          ],
          outputs=['angle', 'throttle'])

    steering_controller = PCA9685(cfg.STEERING_CHANNEL)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    V.add(steering, inputs=['angle'])
    V.add(throttle, inputs=['throttle'])

    #add tub to save data
    inputs = [
        'cam/image_array', 'user/angle', 'user/throttle', 'user/mode',
        'pilot/angle', 'pilot/throttle'
    ]
    types = ['image_array', 'float', 'float', 'str', 'float', 'float']

    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types)
    V.add(tub, inputs=inputs, run_condition='recording')

    #run the vehicle
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)

    print("You can now go to <your pi ip address>:8887 to drive your car.")
예제 #7
0
def sequence_train(cfg, tub_names, model_name, transfer_model, model_type,
                   continuous):
    '''
    use the specified data in tub_names to train an artifical neural network
    saves the output trained model as model_name
    trains models which take sequence of images
    '''
    import sklearn
    import matplotlib.pyplot as plt
    from sklearn.model_selection import train_test_split
    from sklearn.utils import shuffle
    from PIL import Image
    import json

    assert (not continuous)
    print(' ============================== ' + model_type)
    print(" ==========================---- " + str(model_type == 'rnn'))
    print("sequence of images training")

    if model_type == "rnn":
        print(" ========================== here")
        kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W,
                           image_h=cfg.IMAGE_H,
                           image_d=cfg.IMAGE_DEPTH,
                           seq_length=cfg.SEQUENCE_LENGTH,
                           num_outputs=2)
    if model_type == "rnn_bin":
        kl = KerasRNN_Categorical(image_w=cfg.IMAGE_W,
                                  image_h=cfg.IMAGE_H,
                                  image_d=cfg.IMAGE_DEPTH,
                                  seq_length=cfg.SEQUENCE_LENGTH,
                                  num_outputs=15)

    elif model_type == "3d":
        kl = Keras3D_CNN(image_w=cfg.IMAGE_W,
                         image_h=cfg.IMAGE_H,
                         image_d=cfg.IMAGE_DEPTH,
                         seq_length=cfg.SEQUENCE_LENGTH,
                         num_outputs=2)

    tubs = gather_tubs(cfg, tub_names)

    records = []

    for tub in tubs:
        record_paths = glob.glob(os.path.join(tub.path, 'record_*.json'))
        print("Tub:", tub.path, "has", len(record_paths), 'records')

        record_paths.sort(key=get_record_index)
        records += record_paths

    print('collating records')
    gen_records = {}

    for record_path in records:

        with open(record_path, 'r') as fp:
            json_data = json.load(fp)

        basepath = os.path.dirname(record_path)
        image_filename = json_data["cam/image_array"]
        image_path = os.path.join(basepath, image_filename)
        sample = {
            'record_path': record_path,
            "image_path": image_path,
            "json_data": json_data
        }

        sample["tub_path"] = basepath
        sample["index"] = get_image_index(image_filename)

        angle = float(json_data['user/angle'])
        throttle = float(json_data["user/throttle"])
        if (model_type == "rnn_bin"):
            sample['target_output'] = dk.utils.linear_bin(angle)
        else:
            sample['target_output'] = np.array([angle, throttle])

        sample['img_data'] = None

        key = make_key(sample)

        gen_records[key] = sample

    print('collating sequences')

    sequences = []

    for k, sample in gen_records.items():

        seq = []

        for i in range(cfg.SEQUENCE_LENGTH):
            key = make_next_key(sample, i)
            if key in gen_records:
                seq.append(gen_records[key])
            else:
                continue

        if len(seq) != cfg.SEQUENCE_LENGTH:
            continue

        sequences.append(seq)

    #shuffle and split the data
    train_data, val_data = train_test_split(sequences,
                                            shuffle=True,
                                            test_size=(1 -
                                                       cfg.TRAIN_TEST_SPLIT))

    def generator(data, batch_size=cfg.BATCH_SIZE):
        num_records = len(data)

        while True:
            #shuffle again for good measure
            shuffle(data)

            for offset in range(0, num_records, batch_size):
                batch_data = data[offset:offset + batch_size]

                if len(batch_data) != batch_size:
                    break

                b_inputs_img = []
                b_labels = []

                for seq in batch_data:
                    inputs_img = []
                    labels = []
                    for record in seq:
                        #get image data if we don't already have it
                        if record['img_data'] is None:
                            img_arr = load_scaled_image_arr(
                                record['image_path'], cfg)
                            record['img_data'] = img_arr

                        inputs_img.append(record['img_data'])
                    labels.append(seq[-1]['target_output'])

                    b_inputs_img.append(inputs_img)
                    b_labels.append(labels)

                X = [np.array(b_inputs_img).reshape(batch_size,\
                    cfg.SEQUENCE_LENGTH, cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)]
                if (model_type == 'rnn_bin'):
                    y = np.array(b_labels).reshape(batch_size, 15)
                else:
                    y = np.array(b_labels).reshape(batch_size, 2)

                yield X, y

    train_gen = generator(train_data)
    val_gen = generator(val_data)

    model_path = os.path.expanduser(model_name)

    total_records = len(sequences)
    total_train = len(train_data)
    total_val = len(val_data)

    print('train: %d, validation: %d' % (total_train, total_val))
    steps_per_epoch = total_train // cfg.BATCH_SIZE
    print('steps_per_epoch', steps_per_epoch)

    history, save_best = kl.train(train_gen,
                                  val_gen,
                                  saved_model_path=model_path,
                                  steps=steps_per_epoch,
                                  train_split=cfg.TRAIN_TEST_SPLIT,
                                  use_early_stop=cfg.USE_EARLY_STOP)

    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss : %f' % save_best.best)
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.savefig(model_path + '_' + model_type +
                '_loss_%f.png' % save_best.best)