def test_data_augmentation_collapse(): dataset = DatasetImpl(path=data_path, shape=[256, 256], augmentation=True, collapse_length=2, is_raw=True) gt_idx, gt_k, gt_flip, gt_angle = 1, 2, False, 0 gt_box, gt_seq_length = np.array([0, 0, 260, 346]), 1 events, timestamps, images, aug_params = dataset.__getitem__( idx=gt_idx, k=gt_k, is_flip=gt_flip, angle=gt_angle, box=gt_box, seq_length=gt_seq_length) assert gt_idx == aug_params[0] assert gt_seq_length == aug_params[1] assert gt_k == aug_params[2] assert (gt_box == aug_params[3]).all() assert gt_angle == aug_params[4] assert gt_flip == aug_params[5] element1 = tuple(read_test_elem(1, element_index=0, box=gt_box)) element2 = tuple(read_test_elem(2, element_index=0, box=gt_box)) gt_events = concat_events(element1[0], element2[0]) gt_timestamps = np.array([0, element2[2] - element1[1]]) gt_events['timestamp'] -= element1[1] assert element1[2] == element2[1] assert (element1[4] == element2[3]).all() gt_images = np.concatenate([element1[3][None], element2[4][None]], axis=0).astype(np.float32) compare(events, gt_events) assert (timestamps == gt_timestamps).all() assert (images == gt_images).all()
def test_dataloader(): dataset = DatasetImpl(path=data_path, shape=[260, 346], augmentation=False, collapse_length=1, is_raw=True) data_loader = torch.utils.data.DataLoader(dataset, collate_fn=collate_wrapper, batch_size=2, pin_memory=True, shuffle=False) batch = next(iter(data_loader)) element1 = tuple(read_test_elem(0, element_index=0, is_torch=True)) element2 = tuple(read_test_elem(1, element_index=0, is_torch=True)) element1[0]['timestamp'] -= element1[1] element2[0]['timestamp'] -= element2[1] gt_events = concat_events(element1[0], element2[0]) gt_events['sample_index'] = np.hstack( [np.full_like(element1[0]['x'], 0), np.full_like(element2[0]['x'], 1)]) gt_events = {k: torch.tensor(v) for k, v in gt_events.items()} gt_timestamps = torch.tensor( [0, element1[2] - element1[1], 0, element2[2] - element2[1]], dtype=torch.float32) gt_sample_idx = torch.tensor([0, 0, 1, 1], dtype=torch.long) image00 = torch.tensor(element1[3], dtype=torch.float32)[None, None] image01 = torch.tensor(element1[4], dtype=torch.float32)[None, None] image10 = torch.tensor(element2[3], dtype=torch.float32)[None, None] image11 = torch.tensor(element2[4], dtype=torch.float32)[None, None] gt_images = torch.cat([image00, image01, image10, image11], dim=0) \ .to(torch.float32) compare(batch['events'], gt_events) assert torch.equal(batch['timestamps'], gt_timestamps) assert torch.equal(batch['sample_idx'], gt_sample_idx) assert (batch['images'] == gt_images).all() assert batch['size'] == 2
def test_data_augmentation_crop(): dataset = DatasetImpl(path=data_path, shape=[256, 256], augmentation=True, collapse_length=2, is_raw=True) gt_idx, gt_k, gt_flip, gt_angle = 1, 1, False, 0 gt_box, gt_seq_length = np.array([1, 2, 100, 150]), 1 events, timestamps, images, aug_params = dataset.__getitem__( idx=gt_idx, k=gt_k, is_flip=gt_flip, angle=gt_angle, box=gt_box, seq_length=gt_seq_length) assert gt_idx == aug_params[0] assert gt_seq_length == aug_params[1] assert gt_k == aug_params[2] assert (gt_box == aug_params[3]).all() assert gt_angle == aug_params[4] assert gt_flip == aug_params[5] assert images.shape[-2:] == tuple(gt_box[-2:]) assert (events['x'] >= 0).all() assert (events['y'] >= 0).all() assert (events['x'] < gt_box[-1]).all() assert (events['y'] < gt_box[-2]).all() gt_events, _, _, gt_image1, gt_image2 = read_test_elem(gt_idx, element_index=0) gt_images = np.concatenate([gt_image1[None], gt_image2[None]], axis=0) box_stop = [gt_box[0] + gt_box[2], gt_box[1] + gt_box[3]] assert (gt_images[:, gt_box[0]:box_stop[0], gt_box[1]:box_stop[1]] == images).all() mask = np.logical_and( np.logical_and(gt_events['x'] >= gt_box[1], gt_events['x'] < box_stop[1]), np.logical_and(gt_events['y'] >= gt_box[0], gt_events['y'] < box_stop[0])) # (x, y) -> linear index cropped_indices = np.ravel_multi_index( np.vstack([events['y'][None], events['x'][None]]), images.shape[-2:]) original_indices = np.ravel_multi_index( np.vstack([gt_events['y'][mask][None], gt_events['x'][mask][None]]), gt_images.shape[-2:]) for i in range(images.shape[0]): assert (images[i].ravel()[cropped_indices] == gt_images[i].ravel() [original_indices]).all()
def test_pred_flow(): x0, y0 = 0, 0 H, W = 246, 340 dtype = torch.float32 events, start, stop, image1, image2, flow = \ read_test_elem(1, box=[y0, x0, H, W], is_torch=True, read_pred=True) images = torch.cat([image1[None, None], image2[None, None]], axis=0).to(torch.float32) timestamps = torch.tensor([0, stop - start], dtype=dtype) sample_idx = torch.LongTensor([0, 0]) flow_sample_idx = torch.LongTensor([0]) flow = flow.permute(2, 0, 1)[None] evaluator = Losses([(H, W)], 1, 'cpu') loss = evaluator([flow], timestamps.view(1, 2), flow_sample_idx, images, timestamps, sample_idx) assert len(loss) == 3 for i, (l, gt) in enumerate(zip(loss, [0.002120, 0.652659, 0.007802])): assert len(l) == 1 assert (l[0] - gt).abs() < 5e-6, f'[{i}] {l} vs {gt}'
def test_zero_flow(): x0, y0 = 0, 0 B, H, W = 1, 246, 340 dtype = torch.float32 events, start, stop, image1, image2 = read_test_elem(1, box=[y0, x0, H, W], is_torch=True) images = torch.cat([image1[None, None], image2[None, None]], axis=0).to(torch.float32) timestamps = torch.tensor([0, stop - start], dtype=dtype) sample_idx = torch.LongTensor([0, 0]) flow_sample_idx = torch.LongTensor([0]) flow = torch.zeros((B, 2, H, W), dtype=dtype) evaluator = Losses([(H, W)], 1, 'cpu') loss = evaluator([flow], timestamps.view(1, 2), flow_sample_idx, images, timestamps, sample_idx) assert len(loss) == 3 for i, (l, gt) in enumerate(zip(loss, [0.002, 0.622660, 0])): assert len(l) == 1 assert (l[0] - gt).abs() < 5e-6, f'[{i}] {l} vs {gt}'