Пример #1
0
def test_normalize_scale():
    assert NormalizeScale().__repr__() == 'NormalizeScale()'

    pos = torch.randn((10, 3))

    data = Data(pos=pos)
    data = NormalizeScale()(data)
    assert len(data) == 1
    assert data.pos.min().item() > -1
    assert data.pos.max().item() < 1
Пример #2
0
    def get_data(self):
        from torch_geometric.datasets.modelnet import ModelNet
        from torch_geometric.transforms import SamplePoints, Compose, NormalizeScale, RandomRotate, RandomTranslate
        from torch_geometric.data import DataLoader

        trans = Compose((SamplePoints(self.nr_points), NormalizeScale(),
                         RandomTranslate(0.01), RandomRotate(180)))

        #dataset = ModelNet('/media/j-pc-ub/ExtraLinux', name='40', train=True, transform=trans)
        dataset = ModelNet('data/mn40', name='40', train=True, transform=trans)
        nr_classes = len(dataset)
        self.nr_classes = nr_classes

        dataset = dataset.shuffle()
        train_loader = DataLoader(dataset,
                                  batch_size=self.batch_size,
                                  drop_last=True)

        dataset_val = ModelNet('data/mn40',
                               name='40',
                               train=False,
                               transform=trans)
        val_loader = DataLoader(dataset_val,
                                batch_size=self.batch_size,
                                drop_last=True)

        return train_loader, val_loader
Пример #3
0
    def __init__(self,
                 seq,
                 node_name,
                 cloud_topic_name,
                 tf_topic_name,
                 dataset,
                 global_tf_name="map",
                 child_tf_name="car"):
        rospy.init_node(node_name)
        self.cloud_pub = rospy.Publisher(cloud_topic_name,
                                         PointCloud2,
                                         queue_size=queue_size)
        self.transform_broadcaster = tf2_ros.TransformBroadcaster()
        self.est_tf_pub = rospy.Publisher(
            tf_topic_name, TransformStamped,
            queue_size=queue_size)  # for visualization
        self.gt_tf_pub = rospy.Publisher(
            "gt_pose", TransformStamped,
            queue_size=queue_size)  # for visualization
        self.cap_pub = rospy.Publisher("CAP",
                                       CloudAndPose,
                                       queue_size=queue_size)
        self.rate = rospy.Rate(sleep_rate)
        self.header = Header(frame_id=global_tf_name)
        self.child_tf_name = child_tf_name  # base name before appending prefix
        self.dataset = dataset
        self.seq = seq

        transform_dict = OrderedDict()
        transform_dict[GridSampling([args.grid_size] * 3)] = ["train", "test"]
        transform_dict[NormalizeScale()] = ["train", "test"]
        transform = ComposeAdapt(transform_dict)
        self.model = Net(graph_input=LOAD_GRAPH,
                         act="LeakyReLU",
                         transform=transform,
                         dof=7)
        if args.model_path is not None and osp.exists(args.model_path):
            self.model.load_state_dict(
                torch.load(args.model_path, map_location=torch.device("cpu")))
            print("loaded weights from", args.model_path)
        self.model.eval()

        self.absolute_gt_pose = np.eye(4)[:3, :]
        self.absolute_est_pose = np.eye(4)[:3, :]
        self.infer_time_meter = AverageMeter()
        self.tr_error_meter = AverageMeter()
        self.rot_error_meter = AverageMeter()

        self.fields = [
            PointField('x', 0, PointField.FLOAT32, 1),
            PointField('y', 4, PointField.FLOAT32, 1),
            PointField('z', 8, PointField.FLOAT32, 1),
            PointField('intensity', 12, PointField.FLOAT32, 1)
        ]
        self.pose_list = []
class CIFAR10SuperpixelsDataModule(LightningDataModule):
    def __init__(
        self,
        data_dir: str = "data/",
        train_val_test_split: Sequence[int] = (45_000, 5_000, 10_000),
        n_segments: int = 100,
        sp_generation_workers: int = 4,
        batch_size: int = 32,
        num_workers: int = 0,
        pin_memory: bool = False,
        **kwargs,
    ):
        """DataModule which converts CIFAR10 to dataset of superpixel graphs.
        Conversion happens on first run only.
        When changing pre_transforms you need to manually delete previously generated dataset files!

        Args:
            data_dir (str):                         Path to data folder.
            train_val_test_split (Sequence[int]):   Number of datapoints for training, validation and testing. Should sum up to 70_000.
            n_segments (int):                       Number of superpixels per image.
            sp_generation_workers (int):            Number of processes for superpixel dataset generation.
            batch_size (int):                       Batch size.
            num_workers (int):                      Number of processes for data loading.
            pin_memory (bool):                      Whether to pin CUDA memory (slight speed up for GPU users).
            **kwargs :                              Extra paramters passed to SLIC algorithm, learn more here:
                                                    https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.slic
        """
        super().__init__()

        self.data_dir = data_dir
        self.train_val_test_split = train_val_test_split

        # superpixel graph parameters
        self.n_segments = n_segments
        self.sp_generation_workers = sp_generation_workers

        # dataloader parameters
        self.batch_size = batch_size
        self.num_workers = num_workers
        self.pin_memory = pin_memory

        self.slic_kwargs = kwargs

        self.pre_transform = T.Compose(
            [
                NormalizeScale(),
            ]
        )
        self.transform = None
        self.pre_filter = None

        self.data_train: Optional[Dataset] = None
        self.data_val: Optional[Dataset] = None
        self.data_test: Optional[Dataset] = None
Пример #5
0
    def get_data(self):
        from torch_geometric.datasets.geometry import GeometricShapes
        from torch_geometric.transforms import SamplePoints, Compose, NormalizeScale, RandomRotate, RandomTranslate
        from torch_geometric.data import DataLoader

        trans = Compose((SamplePoints(self.nr_points), NormalizeScale(),
                         RandomTranslate(0.01), RandomRotate(180)))

        dataset = GeometricShapes('data/geometric',
                                  train=True,
                                  transform=trans)
        nr_classes = len(dataset)
        self.nr_classes = nr_classes

        dataset = dataset.shuffle()

        val_loader = DataLoader(dataset,
                                batch_size=self.batch_size,
                                drop_last=True)
        train_loader = DataLoader(dataset,
                                  batch_size=self.batch_size,
                                  drop_last=True)

        return train_loader, val_loader
Пример #6
0
BATCH_SIZE = args.batch_size
EPOCH = args.epoch
LR = args.lr
LR_DECAY = args.lr_decay
GRID_SAMPLE_SIZE = [args.grid_size] * 3
LOAD_GRAPH = False
DOF = args.dof
WEIGHT_DECAY = args.weight_decay
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
Kitti = KittiGraph if LOAD_GRAPH else KittiStandard
L2_LAMBDA = args.reg_lambda

transform_dict = OrderedDict()
transform_dict[GridSampling(GRID_SAMPLE_SIZE)] = ["train", "test"]
transform_dict[NormalizeScale()] = ["train", "test"]
transform_dict[RandomTranslate(args.random_trans)] = ["train", "test"]
transform = ComposeAdapt(transform_dict)
#transform = GridSampling(GRID_SAMPLE_SIZE)


def adjust_lr(optimizer, epoch):
    lr = LR * (0.1**(epoch / LR_DECAY))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


def train(model, epoch, train_loader, optimizer, criterion_x, criterion_rot):
    model.train()
    temp_loss = AverageMeter()
    temp_mse_loss = AverageMeter()