def solve(params, cis):
    # python has dynamic typing, the line below can help IDEs with autocompletion
    assert isinstance(cis, ChallengeInterfaceSolution)
    # after this cis. will provide you with some autocompletion in some IDEs (e.g.: pycharm)
    cis.info('Creating model.')
    # you can have logging capabilties through the solution interface (cis).
    # the info you log can be retrieved from your submission files.

    # BEGIN SUBMISSION
    # if you have a model class with a predict function this are likely the only lines you will need to modifiy
    from model import TensorflowModel
    # define observation and output shapes
    model = TensorflowModel(
        observation_shape=(1, ) +
        expect_shape,  # this is the shape of the image we get.
        action_shape=(1, 1),  # we need to output v, omega.
        graph_location='tf_models/'
    )  # this is the folder where our models are stored.
    # END SUBMISSION
    try:

        # We get environment from the Evaluation Engine
        cis.info('Making environment')
        env = gym.make(params['env'])
        # Then we make sure we have a connection with the environment and it is ready to go
        cis.info('Reset environment')
        observation = env.reset()
        check_valid_observations(observation)

        cis.info('Obtained first observations.')
        # While there are no signal of completion (simulation done)
        # we run the predictions for a number of episodes, don't worry, we have the control on this part
        while True:
            # we passe the observation to our model, and we get an action in return
            action = model.predict(observation)
            # we tell the environment to perform this action and we get some info back in OpenAI Gym style
            observation, reward, done, info = env.step(action)
            check_valid_observations(observation)
            # here you may want to compute some stats, like how much reward are you getting
            # notice, this reward may no be associated with the challenge score.

            # it is important to check for this flag, the Evalution Engine will let us know when should we finish
            # if we are not careful with this the Evaluation Engine will kill our container and we will get no score
            # from this submission
            if 'simulation_done' in info:
                cis.info('Received simulation_done.')
                break
            if done:
                cis.info('End of episode')
                env.reset()

    finally:
        cis.info('Releasing CPU/GPU resources.')
        # release CPU/GPU resources, let's be friendly with other users that may need them
        model.close()

    cis.info("Graceful exit of solve().")
ACTIONS_SHAPE = (None, 1)
SEED = 1234
STORAGE_LOCATION = "trained_models/behavioral_cloning"

reader = Reader('train.log')

observations, actions = reader.read()
actions = np.array(actions)
actions = actions[:, [1]]
print(actions.shape)
observations = np.array(observations)

model = TensorflowModel(
    observation_shape=OBSERVATIONS_SHAPE,  # from the logs we've got
    action_shape=ACTIONS_SHAPE,  # same
    graph_location=
    STORAGE_LOCATION,  # where do we want to store our trained models
    seed=SEED  # to seed all random operations in the model (e.g., dropout)
)

min_loss = 1000

# we trained for EPOCHS epochs
epochs_bar = tqdm(range(EPOCHS))
for i in epochs_bar:
    # we defined the batch size, this can be adjusted according to your computing resources...
    loss = None
    for batch in range(0, len(observations), BATCH_SIZE):
        loss = model.train(observations=observations[batch:batch + BATCH_SIZE],
                           actions=actions[batch:batch + BATCH_SIZE])
Exemple #3
0
STEPS = 500

env = DuckietownEnv(
    map_name='loop_empty',
    max_steps=EPISODES * STEPS,
    domain_rand=False,
    camera_width=640,
    camera_height=480,
    accept_start_angle_deg=4,  # start close to straight
    full_transparency=True,
)

model = TensorflowModel(
    observation_shape=OBSERVATIONS_SHAPE,  # from the logs we've got
    action_shape=ACTIONS_SHAPE,  # same
    graph_location=
    STORAGE_LOCATION,  # where do we want to store our trained models
    seed=SEED  # to seed all random operations in the model (e.g., dropout)
)

observation = env.reset()

# we can use the gym reward to get an idea of the performance of our model
cumulative_reward = 0.0

for episode in range(0, EPISODES):
    for steps in range(0, STEPS):
        action = model.predict(observation)
        #print('Action', action)
        #action = np.abs(action)
        observation, reward, done, info = env.step(action)
Exemple #4
0
 def export(self):
     TensorflowModel.export(self, "synth.pb")
Exemple #5
0
 def __init__(self, num_nodes):
     self.num_nodes = num_nodes
     TensorflowModel.__init__(self, "synth")
Exemple #6
0
 def __init__(self, num_nodes):
     self.num_nodes = num_nodes
     TensorflowModel.__init__(self, "resonate")
Exemple #7
0
 def export(self):
     TensorflowModel.export(self, "resonate.pb")