def train(cfg: Config, tub_paths: str, model: str, model_type: str) -> \ tf.keras.callbacks.History: """ Train the model """ model_name, model_ext = os.path.splitext(model) is_tflite = model_ext == '.tflite' if is_tflite: model = f'{model_name}.h5' if not model_type: model_type = cfg.DEFAULT_MODEL_TYPE tubs = tub_paths.split(',') all_tub_paths = [os.path.expanduser(tub) for tub in tubs] output_path = os.path.expanduser(model) train_type = 'linear' if 'linear' in model_type else model_type kl = get_model_by_type(train_type, cfg) if cfg.PRINT_MODEL_SUMMARY: print(kl.model.summary()) dataset = TubDataset(cfg, all_tub_paths) training_records, validation_records = dataset.train_test_split() print(f'Records # Training {len(training_records)}') print(f'Records # Validation {len(validation_records)}') training_pipe = BatchSequence(kl, cfg, training_records, is_train=True) validation_pipe = BatchSequence(kl, cfg, validation_records, is_train=False) dataset_train = training_pipe.create_tf_data().prefetch( tf.data.experimental.AUTOTUNE) dataset_validate = validation_pipe.create_tf_data().prefetch( tf.data.experimental.AUTOTUNE) train_size = len(training_pipe) val_size = len(validation_pipe) assert val_size > 0, "Not enough validation data, decrease the batch " \ "size or add more data." history = kl.train(model_path=output_path, train_data=dataset_train, train_steps=train_size, batch_size=cfg.BATCH_SIZE, validation_data=dataset_validate, validation_steps=val_size, epochs=cfg.MAX_EPOCHS, verbose=cfg.VERBOSE_TRAIN, min_delta=cfg.MIN_DELTA, patience=cfg.EARLY_STOP_PATIENCE) if is_tflite: tf_lite_model_path = f'{os.path.splitext(output_path)[0]}.tflite' keras_model_to_tflite(output_path, tf_lite_model_path) return history
def test_training_pipeline(config: Config, model_type: str, train_filter: Callable[[TubRecord], bool]) -> None: """ Testing consistency of the model interfaces and data used in training pipeline. :param config: donkey config :param model_type: test specification of model type :param train_filter: filter for records :return: None """ kl = get_model_by_type(model_type, config) tub_dir = config.DATA_PATH_ALL if model_type in full_tub else \ config.DATA_PATH # don't shuffle so we can identify data for testing config.TRAIN_FILTER = train_filter dataset = TubDataset(config, [tub_dir], seq_size=kl.seq_size()) training_records, validation_records = \ train_test_split(dataset.get_records(), shuffle=False, test_size=(1. - config.TRAIN_TEST_SPLIT)) seq = BatchSequence(kl, config, training_records, True) data_train = seq.create_tf_data() num_whole_batches = len(training_records) // config.BATCH_SIZE # this takes all batches into one list tf_batch = list(data_train.take(num_whole_batches).as_numpy_iterator()) it = iter(training_records) for xy_batch in tf_batch: # extract x and y values from records, asymmetric in x and y b/c x # requires image manipulations batch_records = [next(it) for _ in range(config.BATCH_SIZE)] records_x = [ kl.x_translate(kl.x_transform_and_process(r, normalize_image)) for r in batch_records ] records_y = [kl.y_translate(kl.y_transform(r)) for r in batch_records] # from here all checks are symmetrical between x and y for batch, o_type, records \ in zip(xy_batch, kl.output_types(), (records_x, records_y)): # check batch dictionary have expected keys assert batch.keys() == o_type.keys(), \ 'batch keys need to match models output types' # convert record values into arrays of batch size values = defaultdict(list) for r in records: for k, v in r.items(): values[k].append(v) # now convert arrays of floats or numpy arrays into numpy arrays np_dict = dict() for k, v in values.items(): np_dict[k] = np.array(v) # compare record values with values from tf.data for k, v in batch.items(): assert np.isclose(v, np_dict[k]).all()
def train(cfg: Config, tub_paths: str, model: str = None, model_type: str = None, transfer: str = None, comment: str = None) \ -> tf.keras.callbacks.History: """ Train the model """ database = PilotDatabase(cfg) model_name, model_num, train_type, is_tflite = \ get_model_train_details(cfg, database, model, model_type) output_path = os.path.join(cfg.MODELS_PATH, model_name + '.h5') kl = get_model_by_type(train_type, cfg) if transfer: kl.load(transfer) if cfg.PRINT_MODEL_SUMMARY: print(kl.model.summary()) tubs = tub_paths.split(',') all_tub_paths = [os.path.expanduser(tub) for tub in tubs] dataset = TubDataset(cfg, all_tub_paths) training_records, validation_records = dataset.train_test_split() print(f'Records # Training {len(training_records)}') print(f'Records # Validation {len(validation_records)}') training_pipe = BatchSequence(kl, cfg, training_records, is_train=True) validation_pipe = BatchSequence(kl, cfg, validation_records, is_train=False) dataset_train = training_pipe.create_tf_data().prefetch( tf.data.experimental.AUTOTUNE) dataset_validate = validation_pipe.create_tf_data().prefetch( tf.data.experimental.AUTOTUNE) train_size = len(training_pipe) val_size = len(validation_pipe) assert val_size > 0, "Not enough validation data, decrease the batch " \ "size or add more data." history = kl.train(model_path=output_path, train_data=dataset_train, train_steps=train_size, batch_size=cfg.BATCH_SIZE, validation_data=dataset_validate, validation_steps=val_size, epochs=cfg.MAX_EPOCHS, verbose=cfg.VERBOSE_TRAIN, min_delta=cfg.MIN_DELTA, patience=cfg.EARLY_STOP_PATIENCE, show_plot=cfg.SHOW_PLOT) if is_tflite: tf_lite_model_path = f'{os.path.splitext(output_path)[0]}.tflite' keras_model_to_tflite(output_path, tf_lite_model_path) database_entry = { 'Number': model_num, 'Name': model_name, 'Type': str(kl), 'Tubs': tub_paths, 'Time': time(), 'History': history.history, 'Transfer': os.path.basename(transfer) if transfer else None, 'Comment': comment, 'Config': str(cfg) } database.add_entry(database_entry) database.write() return history
def plot_predictions(self, cfg, tub_paths, model_path, start, limit, model_type): """ Plot model predictions for angle and throttle against data from tubs. """ import matplotlib.pyplot as plt import pandas as pd from pathlib import Path model_path = os.path.expanduser(model_path) model = dk.utils.get_model_by_type(model_type, cfg) # This just gets us the text for the plot title: if model_type is None: model_type = cfg.DEFAULT_MODEL_TYPE model.load(model_path) user_angles = [] user_throttles = [] pilot_angles = [] pilot_throttles = [] base_path = Path(os.path.expanduser(tub_paths)).absolute().as_posix() dataset = TubDataset(config=cfg, tub_paths=[base_path], seq_size=model.seq_size()) records = dataset.get_records() num_records = len(records) if start > num_records: start = num_records - 1000 limit = 1000 if start + limit > num_records: limit = num_records - start records = records[start:start + limit] bar = IncrementalBar('Inferencing', max=len(records)) for tub_record in records: inputs = model.x_transform_and_process( tub_record, lambda x: normalize_image(x)) input_dict = model.x_translate(inputs) pilot_angle, pilot_throttle = \ model.inference_from_dict(input_dict) user_angle, user_throttle = model.y_transform(tub_record) user_angles.append(user_angle) user_throttles.append(user_throttle) pilot_angles.append(pilot_angle) pilot_throttles.append(pilot_throttle) bar.next() angles_df = pd.DataFrame({ 'user_angle': user_angles, 'pilot_angle': pilot_angles }) throttles_df = pd.DataFrame({ 'user_throttle': user_throttles, 'pilot_throttle': pilot_throttles }) fig = plt.figure() title = f"Model Predictions\nTubs: {tub_paths}\nModel: {model_path}\n" \ f"Type: {model_type}" fig.suptitle(title) # pandas DataFrame shift # https://stackoverflow.com/questions/10982089/how-to-shift-a-column-in-pandas-dataframe # you can add the empty row with something like: shift_pos = 1 and, df = df.append(pd.DataFrame([[np.nan for i in df.columns] for i in range(shift_pos)], columns=df.columns)) – epifanio Dec 29 '20 at 15:04 angles_df = angles_df.append( pd.DataFrame([[np.nan for i in angles_df.columns] for i in range(start + limit)], columns=angles_df.columns)) angles_df = angles_df.shift(periods=start + limit) throttles_df = throttles_df.append( pd.DataFrame([[np.nan for i in throttles_df.columns] for i in range(start + limit)], columns=throttles_df.columns)) throttles_df = throttles_df.shift(periods=start + limit) ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212) angles_df.plot(ax=ax1) throttles_df.plot(ax=ax2) ax1.legend(loc=4) ax2.legend(loc=4) plt.savefig(f'{model_path}_pred_{start}_{start+limit}.png') logger.info( f'Saving model at {model_path}_pred_{start}_{start+limit}.png') plt.show()
def train(cfg: Config, tub_paths: str, model: str = None, model_type: str = None, transfer: str = None, comment: str = None) \ -> tf.keras.callbacks.History: """ Train the model """ database = PilotDatabase(cfg) if model_type is None: model_type = cfg.DEFAULT_MODEL_TYPE model_path, model_num = \ get_model_train_details(database, model) base_path = os.path.splitext(model_path)[0] kl = get_model_by_type(model_type, cfg) if transfer: kl.load(transfer) if cfg.PRINT_MODEL_SUMMARY: print(kl.interpreter.model.summary()) tubs = tub_paths.split(',') all_tub_paths = [os.path.expanduser(tub) for tub in tubs] dataset = TubDataset(config=cfg, tub_paths=all_tub_paths, seq_size=kl.seq_size()) training_records, validation_records \ = train_test_split(dataset.get_records(), shuffle=True, test_size=(1. - cfg.TRAIN_TEST_SPLIT)) print(f'Records # Training {len(training_records)}') print(f'Records # Validation {len(validation_records)}') # We need augmentation in validation when using crop / trapeze training_pipe = BatchSequence(kl, cfg, training_records, is_train=True) validation_pipe = BatchSequence(kl, cfg, validation_records, is_train=False) tune = tf.data.experimental.AUTOTUNE dataset_train = training_pipe.create_tf_data().prefetch(tune) dataset_validate = validation_pipe.create_tf_data().prefetch(tune) train_size = len(training_pipe) val_size = len(validation_pipe) ### training/validation length limit. Large validation datasets cause memory leaks. train_limit = cfg.TRAIN_LIMIT train_len = len(training_records) if train_limit is not None and train_len > train_limit: train_decrease = train_limit / train_len _train_size = math.ceil(train_size * train_decrease) print(f'train steps decrease from {train_size} to {_train_size}') train_size = _train_size val_limit = cfg.VALIDATION_LIMIT val_len = len(validation_records) if val_limit is not None and val_len > val_limit: val_decrease = val_limit / val_len _val_size = math.ceil(val_size * val_decrease) print(f'val steps decrease from {val_size} to {_val_size}') val_size = _val_size assert val_size > 0, "Not enough validation data, decrease the batch " \ "size or add more data." history = kl.train(model_path=model_path, train_data=dataset_train, train_steps=train_size, batch_size=cfg.BATCH_SIZE, validation_data=dataset_validate, validation_steps=val_size, epochs=cfg.MAX_EPOCHS, verbose=cfg.VERBOSE_TRAIN, min_delta=cfg.MIN_DELTA, use_early_stop=cfg.USE_EARLY_STOP, patience=cfg.EARLY_STOP_PATIENCE, show_plot=cfg.SHOW_PLOT) if getattr(cfg, 'CREATE_TF_LITE', True): tf_lite_model_path = f'{base_path}.tflite' keras_model_to_tflite(model_path, tf_lite_model_path) if getattr(cfg, 'CREATE_TENSOR_RT', False): # load h5 (ie. keras) model model_rt = load_model(model_path) # save in tensorflow savedmodel format (i.e. directory) model_rt.save(f'{base_path}.savedmodel') # pass savedmodel to the rt converter saved_model_to_tensor_rt(f'{base_path}.savedmodel', f'{base_path}.trt') database_entry = { 'Number': model_num, 'Name': os.path.basename(base_path), 'Type': str(kl), 'Tubs': tub_paths, 'Time': time(), 'History': history.history, 'Transfer': os.path.basename(transfer) if transfer else None, 'Comment': comment, 'Config': str(cfg) } database.add_entry(database_entry) database.write() return history