Beispiel #1
0
    def eventsArrCallback(self,data):
        if data.data is not None:
            event_window = torch.Tensor(data.data).view(-1,4).to(self.device)
        
        with Timer('Processing entire dataset'):

            last_timestamp = event_window[-1, 0]
            #print(event_window[0,0],event_window[-1,0])

            with Timer('Building event tensor'):
                if args.compute_voxel_grid_on_cpu:
                    event_tensor = events_to_voxel_grid(event_window,
                                                        num_bins=self.num_bins,
                                                        width=self.width,
                                                        height=self.height)
                    event_tensor = torch.from_numpy(event_tensor)
                

                else:
                    event_tensor = events_to_voxel_grid_pytorch(event_window,
                                                                num_bins=self.num_bins,
                                                                width=self.width,
                                                                height=self.height,
                                                                device=self.device)
                    
            num_events_in_window = event_window.shape[0]
            self.reconstructor.update_reconstruction(event_tensor, self.start_index + num_events_in_window, last_timestamp)

            self.start_index += num_events_in_window
    if args.compute_voxel_grid_on_cpu:
        print("Will compute voxel grid on CPU.")

    event_tensor_iterator = pd.read_csv(
        path_to_events,
        delim_whitespace=True,
        header=None,
        names=["t", "x", "y", "pol"],
        dtype={"t": np.float64, "x": np.int16, "y": np.int16, "pol": np.int16},
        engine="c",
        skiprows=start_index,
        chunksize=N,
        nrows=None,
    )

    with Timer("Processing entire dataset"):
        for event_tensor_pd in event_tensor_iterator:

            last_timestamp = event_tensor_pd.values[-1, 0]

            with Timer("Building event tensor"):
                if args.compute_voxel_grid_on_cpu:
                    event_tensor = events_to_voxel_grid(
                        event_tensor_pd.values,
                        num_bins=model.num_bins,
                        width=args.width,
                        height=args.height,
                    )
                    event_tensor = torch.from_numpy(event_tensor)
                else:
                    event_tensor = events_to_voxel_grid_pytorch(
    start_index = initial_offset + sub_offset

    if args.compute_voxel_grid_on_cpu:
        print('Will compute voxel grid on CPU.')

    if args.fixed_duration:
        event_window_iterator = FixedDurationEventReader(
            path_to_events,
            duration_ms=args.window_duration,
            start_index=start_index)
    else:
        event_window_iterator = FixedSizeEventReader(path_to_events,
                                                     num_events=N,
                                                     start_index=start_index)

    with Timer('Processing entire dataset'):
        for event_window in event_window_iterator:

            last_timestamp = event_window[-1, 0]

            with Timer('Building event tensor'):
                if args.compute_voxel_grid_on_cpu:
                    event_tensor = events_to_voxel_grid(
                        event_window,
                        num_bins=model.num_bins,
                        width=width,
                        height=height)
                    event_tensor = torch.from_numpy(event_tensor)
                else:
                    event_tensor = events_to_voxel_grid_pytorch(
                        event_window,
def run_reconstruction(**kwargs):

    parser = argparse.ArgumentParser(
        description='Evaluating a trained network')
    parser.add_argument('-c',
                        '--path_to_model',
                        type=str,
                        help='path to model weights')
    parser.add_argument('-i', '--input_file', type=str)
    parser.add_argument('--fixed_duration',
                        dest='fixed_duration',
                        action='store_true')
    parser.set_defaults(fixed_duration=False)
    parser.add_argument(
        '-N',
        '--window_size',
        default=None,
        type=int,
        help=
        "Size of each event window, in number of events. Ignored if --fixed_duration=True"
    )
    parser.add_argument(
        '-T',
        '--window_duration',
        default=33.33,
        type=float,
        help=
        "Duration of each event window, in milliseconds. Ignored if --fixed_duration=False"
    )
    parser.add_argument(
        '--num_events_per_pixel',
        default=0.35,
        type=float,
        help='in case N (window size) is not specified, it will be \
                              automatically computed as N = width * height * num_events_per_pixel'
    )
    parser.add_argument('--skipevents', default=0, type=int)
    parser.add_argument('--suboffset', default=0, type=int)
    parser.add_argument('--compute_voxel_grid_on_cpu',
                        dest='compute_voxel_grid_on_cpu',
                        action='store_true')
    parser.add_argument('--channelName', default=None, type=str)
    parser.set_defaults(compute_voxel_grid_on_cpu=False)

    set_inference_options(parser)

    args = parser.parse_args()

    argsDict = vars(args)
    argsDict.update(kwargs)

    # Load model
    model = load_model(args.path_to_model)
    device = get_device(args.use_gpu)

    model = model.to(device)
    model.eval()

    path_to_events = args.input_file

    container = importAe(filePathOrName=path_to_events, **kwargs)
    channelName = args.channelName
    if channelName is not None:
        dvs = container['data'][channelName]['dvs']
    else:  # use Container functionality to find the right channel
        containerObj = Container(container)
        dvs = containerObj.getDataType('dvs')
    width = dvs.get('dimX', np.max(dvs['x']) + 1)
    height = dvs.get('dimY', np.max(dvs['y']) + 1)

    reconstructor = ImageReconstructor(model, height, width, model.num_bins,
                                       args)
    """ Read chunks of events using Pandas """

    # Loop through the events and reconstruct images
    N = args.window_size
    if not args.fixed_duration:
        if N is None:
            N = int(width * height * args.num_events_per_pixel)
            print(
                'Will use {} events per tensor (automatically estimated with num_events_per_pixel={:0.2f}).'
                .format(N, args.num_events_per_pixel))
        else:
            print('Will use {} events per tensor (user-specified)'.format(N))
            mean_num_events_per_pixel = float(N) / float(width * height)
            if mean_num_events_per_pixel < 0.1:
                print(
                    '!!Warning!! the number of events used ({}) seems to be low compared to the sensor size. \
                    The reconstruction results might be suboptimal.'.format(N))
            elif mean_num_events_per_pixel > 1.5:
                print(
                    '!!Warning!! the number of events used ({}) seems to be high compared to the sensor size. \
                    The reconstruction results might be suboptimal.'.format(N))

    initial_offset = args.skipevents
    sub_offset = args.suboffset
    start_index = initial_offset + sub_offset

    if args.compute_voxel_grid_on_cpu:
        print('Will compute voxel grid on CPU.')

    if args.fixed_duration:
        event_window_iterator = FixedDurationEventReader(
            dvs, duration_ms=args.window_duration, start_index=start_index)
    else:
        event_window_iterator = FixedSizeEventReader(dvs,
                                                     num_events=N,
                                                     start_index=start_index)

    print('Sensor size: {} x {}'.format(width, height))

    with Timer('Processing entire dataset'):
        for event_window in event_window_iterator:

            last_timestamp = event_window[-1, 0]

            with Timer('Building event tensor'):
                if args.compute_voxel_grid_on_cpu:
                    event_tensor = events_to_voxel_grid(
                        event_window,
                        num_bins=model.num_bins,
                        width=width,
                        height=height)
                    event_tensor = torch.from_numpy(event_tensor)
                else:
                    event_tensor = events_to_voxel_grid_pytorch(
                        event_window,
                        num_bins=model.num_bins,
                        width=width,
                        height=height,
                        device=device)

            num_events_in_window = event_window.shape[0]
            reconstructor.update_reconstruction(
                event_tensor, start_index + num_events_in_window,
                last_timestamp)

            start_index += num_events_in_window
Beispiel #5
0
    def update_reconstruction(self, event_tensor, event_tensor_id, stamp=None):
        with torch.no_grad():

            with Timer('Reconstruction'):

                with Timer('NumPy (CPU) -> Tensor (GPU)'):
                    events = event_tensor.unsqueeze(dim=0)
                    events = events.to(self.device)

                events = self.event_preprocessor(events)

                # Resize tensor to [1 x C x crop_size x crop_size] by applying zero padding
                events_for_each_channel = {'grayscale': self.crop.pad(events)}
                reconstructions_for_each_channel = {}
                if self.perform_color_reconstruction:
                    events_for_each_channel['R'] = self.crop_halfres.pad(
                        events[:, :, 0::2, 0::2])
                    events_for_each_channel['G'] = self.crop_halfres.pad(
                        events[:, :, 0::2, 1::2])
                    events_for_each_channel['W'] = self.crop_halfres.pad(
                        events[:, :, 1::2, 0::2])
                    events_for_each_channel['B'] = self.crop_halfres.pad(
                        events[:, :, 1::2, 1::2])

                # Reconstruct new intensity image for each channel (grayscale + RGBW if color reconstruction is enabled)
                for channel in events_for_each_channel.keys():
                    with Timer('Inference'):
                        new_predicted_frame, states = self.model(
                            events_for_each_channel[channel],
                            self.last_states_for_each_channel[channel])

                    if self.no_recurrent:
                        self.last_states_for_each_channel[channel] = None
                    else:
                        self.last_states_for_each_channel[channel] = states

                    # Output reconstructed image
                    crop = self.crop if channel == 'grayscale' else self.crop_halfres

                    # Unsharp mask (on GPU)
                    new_predicted_frame = self.unsharp_mask_filter(
                        new_predicted_frame)

                    # Intensity rescaler (on GPU)
                    new_predicted_frame = self.intensity_rescaler(
                        new_predicted_frame)

                    with Timer('Tensor (GPU) -> NumPy (CPU)'):
                        reconstructions_for_each_channel[
                            channel] = new_predicted_frame[
                                0, 0, crop.iy0:crop.iy1,
                                crop.ix0:crop.ix1].cpu().numpy()

                if self.perform_color_reconstruction:
                    out = merge_channels_into_color_image(
                        reconstructions_for_each_channel)
                else:
                    out = reconstructions_for_each_channel['grayscale']

            # Post-processing, e.g bilateral filter (on CPU)
            out = self.image_filter(out)

            self.image_writer(out, event_tensor_id, stamp, events=events)
            self.image_display(out, events)