def onehot(env):
    name = 'onehot'
    dim = env.nstates

    oh_state_reps = {}
    for state in env.useable:
        oh_state_reps[env.twoD2oneD(state)] = one_hot_state(
            env, env.twoD2oneD(state))

    return oh_state_reps, name, dim, []
memory = EpisodicMemory(env.action_space.n, cache_limit=env.nstates)
with open(data_dir + f'ec_dicts/dc126211-0af0-4fc1-8788-3f1b8567cdc2_EC.p',
          'rb') as f:
    memory.cache_list = pickle.load(f)

### place cell representations
#place_cells = PlaceCells(env.shape, input_dims, field_size=0.25)
# load place cells
with open(data_dir + f'results/{load_id}_data.p', 'rb') as f:
    place_cells = (pickle.load(f))['place_cells']

pc_state_reps = {}
oh_state_reps = {}
for state in env.useable:
    oh_state_reps[env.twoD2oneD(state)] = one_hot_state(env.twoD2oneD(state))
    pc_state_reps[env.twoD2oneD(state)] = place_cells.get_activities([state
                                                                      ])[0]

place_cells.plot_placefields(env_states_to_map=env.useable)

#oh_network = Network(input_dims=[input_dims],fc1_dims=200,fc2_dims=200,output_dims=env.action_space.n, lr=0.0005)
#oh_network = torch.load(data_dir+f'agents/{load_id}.pt')
#oh_agent = Agent(oh_network, state_representations=oh_state_reps)

#pc_network = Network(input_dims=[input_dims],fc1_dims=200,fc2_dims=200,output_dims=env.action_space.n, lr=0.0005)
pc_network = torch.load(data_dir + f'agents/{load_id}.pt')
pc_agent = Agent(pc_network,
                 state_representations=pc_state_reps,
                 memory=memory)
pc_agent.get_action = pc_agent.EC_action