예제 #1
0
    def run(self):
        model = TopkNet(self.cfg).to(Global.DEVICE)
        Global.load_weight(model, self.W_PATH)
        model.eval()

        future_coords_offsets_pd = []
        timestamps = []
        agent_ids = []
        confs = []

        with torch.no_grad():
            dataiter = tqdm(self.test_dataloader)

            for data in dataiter:
                inputs = data["image"].to(Global.DEVICE)
                images = []
                samples_means, means, mixture_weights = model(inputs)

                fit_outputs = torch.stack([mean for mean in means], dim=1)

                fit_confidences = torch.stack(
                    [mixture_weight for mixture_weight in mixture_weights],
                    dim=1).squeeze()
                outputs = torch.zeros(fit_outputs.size(0), fit_outputs.size(1),
                                      fit_outputs.size(2), 2).to(Global.DEVICE)

                conf = []
                one_hot_el = torch.eye(3, 3)
                for i in range(fit_confidences.size(0)):
                    idx = torch.argmax(fit_confidences[i]).item()
                    conf.append(one_hot_el[idx])

                outputs[:, 0] = Utils.map_writer_from_image_to_world(
                    data, fit_outputs[:, 0, :, :], self.cfg)
                outputs[:, 1] = Utils.map_writer_from_image_to_world(
                    data, fit_outputs[:, 1, :, :], self.cfg)
                outputs[:, 2] = Utils.map_writer_from_image_to_world(
                    data, fit_outputs[:, 2, :, :], self.cfg)

                future_coords_offsets_pd.append(outputs.cpu().numpy().copy())
                timestamps.append(data["timestamp"].numpy().copy())
                agent_ids.append(data["track_id"].numpy().copy())
                confs.append(fit_confidences.cpu().numpy().copy())
                if (len(confs) == 10): break
        write_pred_csv(f'{Global.MULTI_MODE_SUBMISSION}',
                       timestamps=np.concatenate(timestamps),
                       track_ids=np.concatenate(agent_ids),
                       coords=np.concatenate(future_coords_offsets_pd),
                       confs=np.concatenate(confs))
예제 #2
0
    def run(self):
        model = TopkNet(self.cfg).to(Global.DEVICE)
        Global.load_weight(model, self.W_PATH)
        optimizer = optim.Adam(model.parameters(), lr=1e-6)
        progress_bar = tqdm(range(self.cfg["train_params"]["max_num_steps"]))
        losses_train = []
        prelosses_train = []

        straight_it = iter(self.straight_train_dataloader)

        for itr in progress_bar:
            data = next(straight_it)
            model.train()
            torch.set_grad_enabled(True)

            inputs = data["image"].to(Global.DEVICE)
            targets = Utils.map_writer_from_world_to_image(data, self.cfg).to(
                Global.DEVICE)
            target_availabilities = data["target_availabilities"].to(
                Global.DEVICE)
            samples_means, means, mixture_weights = model(inputs)

            fit_outputs = torch.stack([mean for mean in means], dim=1)
            fit_confidences = torch.stack(
                [mixture_weight for mixture_weight in mixture_weights],
                dim=1).squeeze()

            if (itr <= 0):
                if (itr <= 20):
                    loss = Loss.sampling_loss(targets, samples_means,
                                              target_availabilities)
                elif (itr <= 40):
                    loss = Loss.sampling_loss(targets, samples_means,
                                              target_availabilities,
                                              "epe-top-n", 40)
                elif (itr <= 60):
                    loss = Loss.sampling_loss(targets, samples_means,
                                              target_availabilities,
                                              "epe-top-n", 20)
                elif (itr <= 80):
                    loss = Loss.sampling_loss(targets, samples_means,
                                              target_availabilities,
                                              "epe-top-n", 10)
                elif (itr <= 100):
                    loss = Loss.sampling_loss(targets, samples_means,
                                              target_availabilities,
                                              "epe-top-n", 5)
                elif (itr <= 120):
                    loss = Loss.sampling_loss(targets, samples_means,
                                              target_availabilities, "epe")
                for i, param in enumerate(model.parameters()):
                    if i >= 68:
                        param.requires_grad = False

            elif (itr <= 200):
                loss, pre_loss = Loss.fitting_loss(targets, fit_outputs,
                                                   fit_confidences,
                                                   target_availabilities)
                for i, param in enumerate(model.parameters()):
                    if i >= 68:
                        param.requires_grad = True
                    elif i < 68:
                        param.requires_grad = False
            elif (itr <= 300):
                loss, pre_loss = Loss.fitting_loss(targets, fit_outputs,
                                                   fit_confidences,
                                                   target_availabilities)
                for i, param in enumerate(model.parameters()):
                    param.requires_grad = True

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            losses_train.append(loss.item())

            if (itr <= 100):
                progress_bar.set_description(
                    f"loss: {loss.item()} loss(avg): {np.mean(losses_train[-100:])}"
                )
            else:
                prelosses_train.append(pre_loss.item())
                progress_bar.set_description(
                    f"pre_loss: {pre_loss.item()} loss(avg): {np.mean(prelosses_train[-100:])}"
                )

            if (itr + 1) % self.cfg['train_params'][
                    'checkpoint_every_n_steps'] == 0 and not Global.DEBUG:
                torch.save(model.state_dict(),
                           f"./output/param/topk_model_state_{itr}.pth")