def update(self): self.x = self.ranged_random_num(0.2, self.x) self.y = self.ranged_random_num(0.2, self.y) self.model.transform = tr.matmul( [tr.translate(self.x, self.y, 0), tr.scale(0.02, 0.02, 0)]) self.while_sick()
def main(args): test_data = DirectoryDataset(args.data_dir) data_loader = DataLoader(test_data, batch_size=128, shuffle=False, num_workers=1, pin_memory=True, drop_last=True) model_type_name = os.path.basename(args.model_file).split(".")[0] model = getattr(models, model_type_name).load(args.model_file, device).to(device) model.eval() print(f"{num_parameters(model)} parameters") all_outputs = [] all_labels = [] for environment_inputs, inputs, real, sim, start_states in data_loader: sim = sim.to(device) inputs_scaled = transformations.scale(sim, *model.input_limits, -1, 1) additional_inputs = torch.cat((inputs, environment_inputs), dim=-1).to(device) additional_inputs_scaled = transformations.scale( additional_inputs, *model.additional_input_limits, -1, 1) augmented_input_seq = torch.cat( (inputs_scaled, additional_inputs_scaled.unsqueeze(1).repeat( 1, inputs_scaled.size(1), 1)), dim=-1) labels = real[:, -1, 1].unsqueeze(-1) output_batch = evaluate(model, augmented_input_seq) # outputs are in range (-1, 1) --> scale to (0, 1) outputs_unscaled = transformations.scale(output_batch, -1, 1, *model.output_limits).cpu() outputs_unscaled = torch.clamp(outputs_unscaled, 0, 1) all_outputs.append(outputs_unscaled) all_labels.append(labels) all_outputs = torch.cat(all_outputs, dim=0) all_labels = torch.cat(all_labels, dim=0) precision, recall, f1 = compute_f1(all_outputs, all_labels) print( f"Precision: {precision.item():.4f}, recall: {recall.item():.4f}, F1: {f1.item():.4f}" )
def draw(self, pipeline): if not self.is_dead(): if self.is_sick(): self.model = self.sick_gpu elif self.is_healthy(): self.model = self.healthy_gpu self.model.transform = tr.matmul( [tr.translate(self.x, self.y, 0), tr.scale(0.02, 0.02, 0)]) sg.drawSceneGraphNode(self.model, pipeline, "transform")
def train_loop(model, train_loader, val_loader, n_epochs, learning_rate): start = time.time() loss_history = [] optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4) criterion = torch.nn.BCEWithLogitsLoss() model = model.to(device) # torch.autograd.set_detect_anomaly(True) for epoch in range(1, n_epochs + 1): train_loss = 0 model.train() for environment_inputs, inputs, real, sim, start_states in tqdm( train_loader): inputs_scaled = transformations.scale(sim, *model.input_limits, -1, 1).to(device) additional_inputs = torch.cat((inputs, environment_inputs), dim=-1) additional_inputs_scaled = transformations.scale( additional_inputs, *model.additional_input_limits, -1, 1).to(device) labels = real[:, -1, 1].unsqueeze(-1).to(device) # labels_scaled = transformations.scale(labels, *model.output_limits, -1, 1).to(device) batch_loss = train_step(inputs_scaled, additional_inputs_scaled, labels, model, optimizer, criterion, validate=False) train_loss += batch_loss avg_train_loss = train_loss / len(train_loader) val_loss = 0 model.eval() with torch.no_grad(): for environment_inputs, inputs, real, sim, start_states in tqdm( val_loader): inputs_scaled = transformations.scale(sim, *model.input_limits, -1, 1).to(device) additional_inputs = torch.cat((inputs, environment_inputs), dim=-1) additional_inputs_scaled = transformations.scale( additional_inputs, *model.additional_input_limits, -1, 1).to(device) labels = real[:, -1, 1].unsqueeze(-1).to(device) # labels_scaled = transformations.scale(labels, *model.output_limits, -1, 1).to(device) batch_loss = train_step(inputs_scaled, additional_inputs_scaled, labels, model, optimizer, criterion, validate=True) val_loss += batch_loss avg_val_loss = val_loss / len(val_loader) loss_history.append([avg_train_loss, avg_val_loss]) print('%s (%d %d%%) %.4f' % (time_since(start, epoch / n_epochs), epoch, epoch / n_epochs * 100, avg_val_loss)) return loss_history
def normalize_matrix(self, array: np.ndarray): t = np.matmul(trans.translate(self.width / 2, self.height / 2), trans.scale(self.scale, -self.scale)) array = np.matmul(array, t.T) return array / array[:, [-1]]