def get_model_by_type(model_type, cfg): from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN if model_type is None: model_type = "categorical" input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH) if model_type == "behavior" or cfg.TRAIN_BEHAVIORS: kl = KerasBehavioral(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), input_shape=input_shape) elif model_type == "imu": kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape) elif model_type == "linear": kl = KerasLinear(input_shape=input_shape) elif model_type == "3d": kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH) elif model_type == "rnn": kl = KerasRNN_LSTM(seq_length=cfg.SEQUENCE_LENGTH, input_shape=input_shape) elif model_type == "categorical": kl = KerasCategorical(input_shape=input_shape) else: raise Exception("unknown model type: %s" % model_type) return kl
def get_model_by_type(model_type, cfg): from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN, KerasLocalizer, KerasLatent if model_type is None: model_type = "categorical" input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH) roi_crop = (cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM) if model_type == "localizer" or cfg.TRAIN_LOCALIZER: kl = KerasLocalizer(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), num_locations=cfg.NUM_LOCATIONS, input_shape=input_shape) elif model_type == "behavior" or cfg.TRAIN_BEHAVIORS: kl = KerasBehavioral(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), input_shape=input_shape) elif model_type == "imu": kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape) elif model_type == "linear": kl = KerasLinear(input_shape=input_shape, roi_crop=roi_crop) elif model_type == "3d": kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH) elif model_type == "rnn": kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH) elif model_type == "categorical": kl = KerasCategorical(input_shape=input_shape, throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE, roi_crop=roi_crop) elif model_type == "latent": kl = KerasLatent(input_shape=input_shape) else: raise Exception("unknown model type: %s" % model_type) return kl
def get_model_by_type(model_type, cfg): ''' given the string model_type and the configuration settings in cfg create a Keras model and return it. ''' from donkeycar.parts.keras import KerasRNN_LSTM, KerasBehavioral, KerasCategorical, KerasIMU, KerasLinear, Keras3D_CNN, KerasLocalizer, KerasLatent from donkeycar.parts.tflite import TFLitePilot if model_type is None: model_type = cfg.DEFAULT_MODEL_TYPE print("\"get_model_by_type\" model Type is: {}".format(model_type)) input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH) roi_crop = (cfg.ROI_CROP_TOP, cfg.ROI_CROP_BOTTOM) if model_type == "tflite_linear": kl = TFLitePilot() elif model_type == "localizer" or cfg.TRAIN_LOCALIZER: kl = KerasLocalizer(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), num_locations=cfg.NUM_LOCATIONS, input_shape=input_shape) elif model_type == "behavior" or cfg.TRAIN_BEHAVIORS: kl = KerasBehavioral(num_outputs=2, num_behavior_inputs=len(cfg.BEHAVIOR_LIST), input_shape=input_shape) elif model_type == "imu": kl = KerasIMU(num_outputs=2, num_imu_inputs=6, input_shape=input_shape) elif model_type == "linear": kl = KerasLinear(input_shape=input_shape, roi_crop=roi_crop) elif model_type == "tensorrt_linear": # Aggressively lazy load this. This module imports pycuda.autoinit which causes a lot of unexpected things # to happen when using TF-GPU for training. from donkeycar.parts.tensorrt import TensorRTLinear kl = TensorRTLinear(cfg=cfg) elif model_type == "3d": kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH) elif model_type == "rnn": kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH) elif model_type == "categorical": kl = KerasCategorical( input_shape=input_shape, throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE, roi_crop=roi_crop) elif model_type == "latent": kl = KerasLatent(input_shape=input_shape) else: raise Exception("unknown model type: %s" % model_type) return kl
def sequence_train(cfg, tub_names, model_name, transfer_model, model_type, continuous, aug): ''' use the specified data in tub_names to train an artifical neural network saves the output trained model as model_name trains models which take sequence of images ''' assert(not continuous) print("sequence of images training") if model_type == "rnn": kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH, num_outputs=2) elif model_type == "3d": kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH, num_outputs=2) else: raise Exception("unknown model type: %s" % model_type) tubs = gather_tubs(cfg, tub_names) records = [] for tub in tubs: record_paths = glob.glob(os.path.join(tub.path, 'record_*.json')) print("Tub:", tub.path, "has", len(record_paths), 'records') record_paths.sort(key=get_record_index) records += record_paths print('collating records') gen_records = {} for record_path in records: with open(record_path, 'r') as fp: json_data = json.load(fp) basepath = os.path.dirname(record_path) image_filename = json_data["cam/image_array"] image_path = os.path.join(basepath, image_filename) sample = { 'record_path' : record_path, "image_path" : image_path, "json_data" : json_data } sample["tub_path"] = basepath sample["index"] = get_image_index(image_filename) angle = float(json_data['user/angle']) throttle = float(json_data["user/throttle"]) sample['target_output'] = np.array([angle, throttle]) sample['img_data'] = None key = make_key(sample) gen_records[key] = sample print('collating sequences') sequences = [] for k, sample in gen_records.items(): seq = [] for i in range(cfg.SEQUENCE_LENGTH): key = make_next_key(sample, i) if key in gen_records: seq.append(gen_records[key]) else: continue if len(seq) != cfg.SEQUENCE_LENGTH: continue sequences.append(seq) #shuffle and split the data train_data, val_data = train_test_split(sequences, shuffle=True, test_size=(1 - cfg.TRAIN_TEST_SPLIT)) def generator(data, batch_size=cfg.BATCH_SIZE): num_records = len(data) while True: #shuffle again for good measure data = shuffle(data) for offset in range(0, num_records, batch_size): batch_data = data[offset:offset+batch_size] if len(batch_data) != batch_size: break b_inputs_img = [] b_labels = [] for seq in batch_data: inputs_img = [] labels = [] for record in seq: #get image data if we don't already have it if record['img_data'] is None: img_arr = load_scaled_image_arr(record['image_path'], cfg) if img_arr is None: break if aug: img_arr = augment_image(img_arr) if cfg.CACHE_IMAGES: record['img_data'] = img_arr else: img_arr = record['img_data'] inputs_img.append(img_arr) if img_arr is None: continue labels.append(seq[-1]['target_output']) b_inputs_img.append(inputs_img) b_labels.append(labels) X = [np.array(b_inputs_img).reshape(batch_size,\ cfg.SEQUENCE_LENGTH, cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)] y = np.array(b_labels).reshape(batch_size, 2) yield X, y train_gen = generator(train_data) val_gen = generator(val_data) model_path = os.path.expanduser(model_name) total_records = len(sequences) total_train = len(train_data) total_val = len(val_data) print('train: %d, validation: %d' %(total_train, total_val)) steps_per_epoch = total_train // cfg.BATCH_SIZE print('steps_per_epoch', steps_per_epoch) if steps_per_epoch < 2: raise Exception("Too little data to train. Please record more records.") kl.train(train_gen, val_gen, saved_model_path=model_path, steps=steps_per_epoch, train_split=cfg.TRAIN_TEST_SPLIT, use_early_stop = cfg.USE_EARLY_STOP)
def run(self, args): ''' Start a websocket SocketIO server to talk to a donkey simulator ''' import socketio from donkeycar.parts.simulation import SteeringServer from donkeycar.parts.keras import KerasCategorical, KerasLinear,\ Keras3D_CNN, KerasRNN_LSTM args, parser = self.parse_args(args) cfg = load_config(args.config) if cfg is None: return #TODO: this logic should be in a pilot or model handler part. if args.type == "categorical": kl = KerasCategorical() elif args.type == "linear": kl = KerasLinear(num_outputs=2) elif args.type == "rnn": kl = KerasRNN_LSTM(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH, num_outputs=2) elif args.type == "3d": kl = Keras3D_CNN(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, seq_length=cfg.SEQUENCE_LENGTH, num_outputs=2) else: print("didn't recognize type:", args.type) return #can provide an optional image filter part img_stack = None #load keras model kl.load(args.model) #start socket server framework sio = socketio.Server() top_speed = float(args.top_speed) #start sim server handler ss = SteeringServer(sio, kpart=kl, top_speed=top_speed, image_part=img_stack) #register events and pass to server handlers @sio.on('telemetry') def telemetry(sid, data): ss.telemetry(sid, data) @sio.on('connect') def connect(sid, environ): ss.connect(sid, environ) ss.go(('0.0.0.0', 9090))
def get_model_by_type(model_type: str, cfg: 'Config') -> 'KerasPilot': ''' given the string model_type and the configuration settings in cfg create a Keras model and return it. ''' from donkeycar.parts.keras import KerasCategorical, KerasLinear, \ KerasInferred, KerasIMU, KerasMemory, KerasBehavioral, KerasLocalizer, \ KerasLSTM, Keras3D_CNN from donkeycar.parts.interpreter import KerasInterpreter, TfLite, TensorRT if model_type is None: model_type = cfg.DEFAULT_MODEL_TYPE logger.info(f'get_model_by_type: model type is: {model_type}') input_shape = (cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH) if 'tflite_' in model_type: interpreter = TfLite() used_model_type = model_type.replace('tflite_', '') elif 'tensorrt_' in model_type: interpreter = TensorRT() used_model_type = model_type.replace('tensorrt_', '') else: interpreter = KerasInterpreter() used_model_type = model_type used_model_type = EqMemorizedString(used_model_type) if used_model_type == "linear": kl = KerasLinear(interpreter=interpreter, input_shape=input_shape) elif used_model_type == "categorical": kl = KerasCategorical( interpreter=interpreter, input_shape=input_shape, throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE) elif used_model_type == 'inferred': kl = KerasInferred(interpreter=interpreter, input_shape=input_shape) elif used_model_type == "imu": kl = KerasIMU(interpreter=interpreter, input_shape=input_shape) elif used_model_type == "memory": mem_length = getattr(cfg, 'SEQUENCE_LENGTH', 3) mem_depth = getattr(cfg, 'MEM_DEPTH', 0) kl = KerasMemory(interpreter=interpreter, input_shape=input_shape, mem_length=mem_length, mem_depth=mem_depth) elif used_model_type == "behavior": kl = KerasBehavioral( interpreter=interpreter, input_shape=input_shape, throttle_range=cfg.MODEL_CATEGORICAL_MAX_THROTTLE_RANGE, num_behavior_inputs=len(cfg.BEHAVIOR_LIST)) elif used_model_type == 'localizer': kl = KerasLocalizer(interpreter=interpreter, input_shape=input_shape, num_locations=cfg.NUM_LOCATIONS) elif used_model_type == 'rnn': kl = KerasLSTM(interpreter=interpreter, input_shape=input_shape, seq_length=cfg.SEQUENCE_LENGTH) elif used_model_type == '3d': kl = Keras3D_CNN(interpreter=interpreter, input_shape=input_shape, seq_length=cfg.SEQUENCE_LENGTH) else: known = [ k + u for k in ('', 'tflite_', 'tensorrt_') for u in used_model_type.mem ] raise ValueError( f"Unknown model type {model_type}, supported types are" f" { ', '.join(known)}") return kl