ap = argparse.ArgumentParser()
ap.add_argument("-d",
                "--data",
                default='train-102k',
                type=str,
                help="name of the data to learn from (without .log)")
ap.add_argument("-e", "--epoch", default=80, type=int, help="number of epochs")
ap.add_argument("-b", "--batch-size", default=32, type=int, help="batch size")
args = vars(ap.parse_args())
DATA = args["data"]
ckptroot = './trained_models/'
# configuration zone
BATCH_SIZE = args["batch_size"]  # define the batch size
EPOCHS = args["epoch"]  # how many times we iterate through our data
STORAGE_LOCATION = "trained_models/"  # where we store our trained models
reader = Reader(f'./train-102k.log')  # where our data lies
MODEL_NAME = "01_NVIDIA"

observations, actions = reader.read(
)  # read the observations & actions from data
actions = np.array(actions)  # convert actions to a np array
observations = np.array(observations)  # convert observations to a np array

# Split the data: Train and Test
x_train, x_test, y_train, y_test = train_test_split(observations,
                                                    actions,
                                                    test_size=0.2,
                                                    random_state=2)
# Split Train data once more for Validation data
val_size = int(len(x_train) * 0.1)
x_validate, y_validate = x_train[:val_size], y_train[:val_size]
from _loggers import Reader
from model import TensorflowModel

# configuration zone
BATCH_SIZE = 78
EPOCHS = 20
TOP_CROP_VALUE = 17
loss_table = []
# here we assume the observations have been resized to 60x80
OBSERVATIONS_SHAPE = (None, 60 - TOP_CROP_VALUE, 80, 3)
#OBSERVATIONS_SHAPE = (None, 60, 80, 3)
ACTIONS_SHAPE = (None, 1)
SEED = 1234
STORAGE_LOCATION = "trained_models/behavioral_cloning"

reader = Reader('train.log')

observations, actions = reader.read()
actions = np.array(actions)
actions = actions[:, [1]]
print(actions.shape)
observations = np.array(observations)

model = TensorflowModel(
    observation_shape=OBSERVATIONS_SHAPE,  # from the logs we've got
    action_shape=ACTIONS_SHAPE,  # same
    graph_location=
    STORAGE_LOCATION,  # where do we want to store our trained models
    seed=SEED  # to seed all random operations in the model (e.g., dropout)
)
Esempio n. 3
0
                help="batch size")
args = vars(ap.parse_args())
DATA = args["data"]

# configuration zone
BATCH_SIZE = args["batch_size"]  # define the batch size
EPOCHS = args["epoch"]  # how many times we iterate through our data
OBSERVATIONS_SHAPE = (
    None, 60, 120, 3
)  # here we assume the observations have been resized to 60x80
ACTIONS_SHAPE = (
    None, 2
)  # actions have a shape of 2: [leftWheelVelocity, rightWheelVelocity]
SEED = 1234
STORAGE_LOCATION = "trained_models/"  # where we store our trained models
reader = Reader(f'../{DATA}.log')  # where our data lies

observations, actions = reader.read()  # read the observations from data
actions = np.array(actions)
observations = np.array(observations)

model = TensorflowModel(
    observation_shape=OBSERVATIONS_SHAPE,  # from the logs we've got
    action_shape=ACTIONS_SHAPE,  # same
    graph_location=
    STORAGE_LOCATION,  # where do we want to store our trained models
    seed=SEED)  # to seed all random operations in the model (e.g., dropout)

prev_loss = 10
# we trained for EPOCHS epochs
epochs_bar = tqdm(range(EPOCHS))