Пример #1
0
def make_config(tester_type, alpha, gamma, epsilon):
    log_dir = f"logs/tester={tester_type}/alpha={alpha}/gamma={gamma}/epsilon={epsilon}"
    return log_dir, Config(
        verbosity=Verbosity.SILENT,
        episode_log=f"{log_dir}/episode.log",
        run_log=f"{log_dir}/run.log",
        seed=0,
        episodes=10,
        max_timesteps=1000,
        terminate_collisions=CollisionType.EGO,
        terminate_ego_zones=True,
        terminate_ego_offroad=False,
        reward_win=6000.0,
        reward_draw=2000.0,
        cost_step=4.0,
        scenario_config=PedestriansConfig(num_pedestrians=1,
                                          outbound_pavement=1.0,
                                          inbound_pavement=1.0),
        ego_config=QLearningConfig(alpha=alpha,
                                   gamma=gamma,
                                   epsilon=epsilon,
                                   features=FeatureConfig(
                                       distance_x=False,
                                       distance_y=False,
                                       distance=True,
                                       relative_angle=True,
                                       heading=True,
                                       on_road=False,
                                       inverse_distance=False),
                                   log=f"{log_dir}/ego-qlearning.log"),
        tester_config=make_tester_config(tester_type),
        mode_config=HeadlessConfig())
Пример #2
0
def make_tester_config(agent_type):
    if agent_type is AgentType.RANDOM:
        return RandomConfig(epsilon=0.01)
    elif agent_type is AgentType.RANDOM_CONSTRAINED:
        return RandomConstrainedConfig(epsilon=0.5)
    elif agent_type is AgentType.PROXIMITY:
        return ProximityConfig(threshold=float(M2PX * 34))
    elif agent_type is AgentType.ELECTION:
        return ElectionConfig(threshold=float(M2PX * 34))
    elif agent_type is AgentType.Q_LEARNING:
        return QLearningConfig(alpha=0.18,
                               gamma=0.87,
                               epsilon=0.0005,
                               features=FeatureConfig(distance_x=True,
                                                      distance_y=True,
                                                      distance=True,
                                                      on_road=False,
                                                      facing=False,
                                                      inverse_distance=True),
                               log=None)
    else:
        raise NotImplementedError
Пример #3
0
            torch.stack([
                torch.from_numpy(gt_positive_choice_ids)[pos_gt],
                torch.from_numpy(pcd_positive_choice_ids)[pos_pcd]
            ],
                        dim=1),
            "neg_indices":
            torch.stack((torch.cat(
                [neg_gt,
                 torch.from_numpy(gt_negative_choice_ids)[neg2_gt]],
                0), torch.cat([neg_pcd, neg2_pcd], 0)),
                        dim=1)
        }


if __name__ == "__main__":
    import open3d
    dataset = SileaneDataset(FeatureConfig())
    model = homogenize_points(load_model("./data/sileane/gear/mesh.ply"))
    for idx in range(10):
        pcd, model_gt, feats_, feats_gt, pos, neg, gt_ = dataset[idx]
        print(model_gt.shape)
        p = open3d.geometry.PointCloud()
        p.points = open3d.utility.Vector3dVector(pcd.numpy()[:, :3])
        pcds = [p]
        for g in gt_:
            k = open3d.geometry.PointCloud()
            k.points = open3d.utility.Vector3dVector(model_gt)
            k.paint_uniform_color([0, 1, 0])
            pcds.append(k)
        open3d.visualization.draw_geometries(pcds)
Пример #4
0
    def __init__(self, config):
        super(PositiveContrastiveLoss, self).__init__()
        self._config = config

    def forward(self, features_gt, features_object, positive_indices):
        return distance(
            feats1=features_object[positive_indices[:, 1].long(), :],
            feats2=features_gt[positive_indices[:, 0].long(), :]).mean()


class NegativeContrastiveLoss(torch.nn.Module):
    def __init__(self, config: FeatureConfig):
        super(NegativeContrastiveLoss, self).__init__()
        self._config = config

    def forward(self, features_model, features_pointcloud, negative_indices):
        return self._config.neg_coef - torch.relu(
            distance(
                feats1=features_pointcloud[negative_indices[:, 1].long(), :],
                feats2=features_model[negative_indices[:,
                                                       0].long(), :])).mean()


if __name__ == "__main__":
    neg = PositiveContrastiveLoss(FeatureConfig())
    print(
        find_model_opposite_points(
            torch.randint(low=0, high=100, size=(100, 3)).float(),
            torch.randint(low=0, high=100, size=(100, 3)).float(),
            100)[0].shape)
Пример #5
0
def get_data_loaders(config: FeatureConfig):
    dataset = SileaneDataset(config)
    return {
        Mode.eval:
        DataLoader(dataset,
                   num_workers=config.num_workers,
                   batch_size=config.batch_size,
                   shuffle=False,
                   collate_fn=collate_pair_fn,
                   drop_last=True),
        Mode.train:
        DataLoader(dataset,
                   num_workers=config.num_workers,
                   batch_size=config.batch_size,
                   shuffle=True,
                   collate_fn=collate_pair_fn,
                   drop_last=True)
    }


if __name__ == "__main__":
    from config import FeatureConfig
    from time import time
    x = get_data_loaders(FeatureConfig())
    start = time()
    for data in x[Mode.eval]:
        print(data["pos_indices"].shape)
        print("ELLAPSED: {}".format(time() - start))
        start = time()
Пример #6
0
from config import FeatureConfig
from training.feature_trainer import FeatureTrainer

trainer = FeatureTrainer(FeatureConfig())
trainer.train(1)