예제 #1
0
    def network_simulation(self, networkpath):
        # load model
        torch.set_default_dtype(torch.float64)
        state_dict = torch.load(networkpath)
        #hidden_layer_size = state_dict["lstm.weight_hh_l0"][1]
        model = LSTM_multi_modal() if output_model == "multi_modal" else LSTM_fixed()
        model.load_state_dict(state_dict)
        model.eval()

        # init hidden
        #hidden_state = [model.init_hidden(1, num_layers) for agent in range(self.num_guppys)]
        states = [[model.init_hidden(1, 1, hidden_layer_size)
                  for i in range(num_layers * 2)]
                  for j in range(self.num_guppys)]
        for i in range(1, len(self.agent_data) - 1):
            for agent in range(self.num_guppys):
                with torch.no_grad():
                    # get input data for this frame
                    sensory = self.craft_vector(i, agent)
                    data = torch.from_numpy(numpy.concatenate((self.loc_vec, sensory)))
                    data = data.view(1, 1, -1)

                    # predict the new ang_turn, lin_speed
                    #out, hidden_state[agent] = model.predict(data, hidden_state[agent])
                    out, states[agent] = model.predict(data, states[agent])
                    ang_turn = out[0].item() if output_model == "multi_modal" else out[0][0][0].item()
                    lin_speed = out[1].item() if output_model == "multi_modal" else out[0][0][1].item()

                    # rotate agent position by angle calculated by network
                    cos_a = cos(ang_turn)
                    sin_a = sin(ang_turn)
                    agent_pos = self.data[agent][i][0], self.data[agent][i][1]
                    agent_ori = self.data[agent][i][2], self.data[agent][i][3]
                    new_ori = [cos_a * agent_ori[0] - sin_a * agent_ori[1], \
                               sin_a * agent_ori[0] + cos_a * agent_ori[1]]
                    # normally the rotation of a normalized vector by a normalized vector should again be a
                    # normalized vector, but it seems there are some numerical errors, so normalize the orientation
                    # again
                    normalize_ori(new_ori)

                    # multiply new orientation by linear speed and add to old position
                    translation_vec = scalar_mul(lin_speed, new_ori)
                    new_pos = vec_add(agent_pos, translation_vec)
                    # network does not learn the tank walls properly sometimes, let fish bump against the wall
                    normalize_pos(new_pos)

                    # update the position for the next timestep
                    self.data[agent][i + 1][0], self.data[agent][i + 1][1] = new_pos
                    self.data[agent][i + 1][2], self.data[agent][i + 1][3] = new_ori

            self.plot_guppy_bins(bins=False)
예제 #2
0
files = [
    join(trainpath, f) for f in listdir(trainpath)
    if isfile(join(trainpath, f)) and f.endswith(".hdf5")
]
files.sort()
num_files = len(files) // 8
files = files[-18:]
print(files)

torch.set_default_dtype(torch.float64)

# now we use a regression model, just predict the absolute values of linear speed and angular turn
# so we need squared_error loss

if output_model == "multi_modal":
    model = LSTM_multi_modal()
    loss_function = nn.CrossEntropyLoss()
else:
    model = LSTM_fixed()
    loss_function = nn.MSELoss()

optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
print(model)
# training

dataset = Guppy_Dataset(files,
                        0,
                        num_guppy_bins,
                        num_wall_rays,
                        livedata=live_data,
                        output_model=output_model)
예제 #3
0
files = [
    join(trainpath, f) for f in listdir(trainpath)
    if isfile(join(trainpath, f)) and f.endswith(".hdf5")
]
files.sort()
num_files = len(files) // 2
files = files[-40:]
print(files)

torch.set_default_dtype(torch.float64)

# now we use a regression model, just predict the absolute values of linear speed and angular turn
# so we need squared_error loss

if output_model == "multi_modal":
    model = LSTM_multi_modal()
    loss_function = nn.CrossEntropyLoss()
else:
    model = LSTM_fixed()
    loss_function = nn.MSELoss()

optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
print(model)
# training

dataset = Guppy_Dataset(files,
                        0,
                        num_guppy_bins,
                        num_wall_rays,
                        livedata=live_data,
                        output_model=output_model,
                       livedata=live_data,
                       output_model=output_model,
                       max_agents=1)
valloader = DataLoader(valset,
                       batch_size=valbatch_size,
                       drop_last=True,
                       shuffle=True)
print(valset.filepaths)

# now we use a regression model, just predict the absolute values of linear speed and angular turn
# so we need squared_error loss

models = {
    "fixed": LSTM_fixed(arch=""),
    "fixedey": LSTM_fixed(arch="ey"),
    "multi_modal": LSTM_multi_modal(arch=""),
    "multi_modaley": LSTM_multi_modal(arch="ey")
}

model = models[hyperparams["overall_model"]]
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loss_function = model.lossfn
print(model)

# training
epochs = 15
train_losses_per_epoch = []
val_losses_per_epoch = []
for i in range(epochs):
    try:
        #training
# get the files for 4, 6 and 8 guppys
trainpath = "guppy_data/live_female_female/train/" if live_data else "guppy_data/couzin_torus/train/"
files = [join(trainpath, f) for f in listdir(trainpath) if isfile(join(trainpath, f)) and f.endswith(".hdf5") ]
files.sort()
num_files = len(files) // 8
files = files[-3:]
print(files)

torch.set_default_dtype(torch.float64)

# now we use a regression model, just predict the absolute values of linear speed and angular turn
# so we need squared_error loss

if output_model == "multi_modal":
    model = LSTM_multi_modal()
    loss_function = nn.CrossEntropyLoss()
else:
    model = LSTM_fixed()
    loss_function = nn.MSELoss()

optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
print(model)
# training

dataset = Guppy_Dataset(files, 0, num_guppy_bins, num_wall_rays, livedata=live_data, output_model=output_model)
dataloader = DataLoader(dataset, batch_size=batch_size, drop_last=True, shuffle=True)

epochs = 12
for i in range(epochs):
    try:
예제 #6
0
files = [
    join(trainpath, f) for f in listdir(trainpath)
    if isfile(join(trainpath, f)) and f.endswith(".hdf5")
]
files.sort()
num_files = len(files)
files = files[:num_files]
print(files)

torch.set_default_dtype(torch.float64)

# now we use a regression model, just predict the absolute values of linear speed and angular turn
# so we need squared_error loss

if output_model == "multi_modal":
    model = LSTM_multi_modal()
    loss_function = nn.CrossEntropyLoss()
else:
    model = LSTM_fixed()
    loss_function = nn.MSELoss()

optimizer = torch.optim.Adam(model.parameters(), lr=0.0005)
print(model)
# training

dataset = Guppy_Dataset(files,
                        0,
                        num_guppy_bins,
                        num_wall_rays,
                        livedata=live_data,
                        output_model=output_model)