) parser.add_argument("--skipevents", default=0, type=int) parser.add_argument("--suboffset", default=0, type=int) parser.add_argument( "--compute_voxel_grid_on_cpu", dest="compute_voxel_grid_on_cpu", action="store_true", ) parser.set_defaults(compute_voxel_grid_on_cpu=False) set_inference_options(parser) args = parser.parse_args() # Loading model model = load_model(args.path_to_model) device = get_device(args.use_gpu) model = model.to(device) model.eval() reconstructor = ImageReconstructor( model, args.height, args.width, model.num_bins, args ) """ Read chunks of events using Pandas """ path_to_events = args.input_file # loop through the events and reconstruct images N = args.window_size
delim_whitespace=True, header=None, names=['width', 'height'], dtype={ 'width': np.int, 'height': np.int }, nrows=1) width, height = header.values[0] print('Sensor size: {} x {}'.format(width, height)) # Load model args.use_gpu = False device = get_device(args.use_gpu) model = load_model(args.path_to_model, device) model = model.to(device) model.eval() reconstructor = ImageReconstructor(model, height, width, model.num_bins, args) """ Read chunks of events using Pandas """ # Loop through the events and reconstruct images N = args.window_size if not args.fixed_duration: if N is None: N = int(width * height * args.num_events_per_pixel) print( 'Will use {} events per tensor (automatically estimated with num_events_per_pixel={:0.2f}).'
def run_reconstruction(**kwargs): parser = argparse.ArgumentParser( description='Evaluating a trained network') parser.add_argument('-c', '--path_to_model', type=str, help='path to model weights') parser.add_argument('-i', '--input_file', type=str) parser.add_argument('--fixed_duration', dest='fixed_duration', action='store_true') parser.set_defaults(fixed_duration=False) parser.add_argument( '-N', '--window_size', default=None, type=int, help= "Size of each event window, in number of events. Ignored if --fixed_duration=True" ) parser.add_argument( '-T', '--window_duration', default=33.33, type=float, help= "Duration of each event window, in milliseconds. Ignored if --fixed_duration=False" ) parser.add_argument( '--num_events_per_pixel', default=0.35, type=float, help='in case N (window size) is not specified, it will be \ automatically computed as N = width * height * num_events_per_pixel' ) parser.add_argument('--skipevents', default=0, type=int) parser.add_argument('--suboffset', default=0, type=int) parser.add_argument('--compute_voxel_grid_on_cpu', dest='compute_voxel_grid_on_cpu', action='store_true') parser.add_argument('--channelName', default=None, type=str) parser.set_defaults(compute_voxel_grid_on_cpu=False) set_inference_options(parser) args = parser.parse_args() argsDict = vars(args) argsDict.update(kwargs) # Load model model = load_model(args.path_to_model) device = get_device(args.use_gpu) model = model.to(device) model.eval() path_to_events = args.input_file container = importAe(filePathOrName=path_to_events, **kwargs) channelName = args.channelName if channelName is not None: dvs = container['data'][channelName]['dvs'] else: # use Container functionality to find the right channel containerObj = Container(container) dvs = containerObj.getDataType('dvs') width = dvs.get('dimX', np.max(dvs['x']) + 1) height = dvs.get('dimY', np.max(dvs['y']) + 1) reconstructor = ImageReconstructor(model, height, width, model.num_bins, args) """ Read chunks of events using Pandas """ # Loop through the events and reconstruct images N = args.window_size if not args.fixed_duration: if N is None: N = int(width * height * args.num_events_per_pixel) print( 'Will use {} events per tensor (automatically estimated with num_events_per_pixel={:0.2f}).' .format(N, args.num_events_per_pixel)) else: print('Will use {} events per tensor (user-specified)'.format(N)) mean_num_events_per_pixel = float(N) / float(width * height) if mean_num_events_per_pixel < 0.1: print( '!!Warning!! the number of events used ({}) seems to be low compared to the sensor size. \ The reconstruction results might be suboptimal.'.format(N)) elif mean_num_events_per_pixel > 1.5: print( '!!Warning!! the number of events used ({}) seems to be high compared to the sensor size. \ The reconstruction results might be suboptimal.'.format(N)) initial_offset = args.skipevents sub_offset = args.suboffset start_index = initial_offset + sub_offset if args.compute_voxel_grid_on_cpu: print('Will compute voxel grid on CPU.') if args.fixed_duration: event_window_iterator = FixedDurationEventReader( dvs, duration_ms=args.window_duration, start_index=start_index) else: event_window_iterator = FixedSizeEventReader(dvs, num_events=N, start_index=start_index) print('Sensor size: {} x {}'.format(width, height)) with Timer('Processing entire dataset'): for event_window in event_window_iterator: last_timestamp = event_window[-1, 0] with Timer('Building event tensor'): if args.compute_voxel_grid_on_cpu: event_tensor = events_to_voxel_grid( event_window, num_bins=model.num_bins, width=width, height=height) event_tensor = torch.from_numpy(event_tensor) else: event_tensor = events_to_voxel_grid_pytorch( event_window, num_bins=model.num_bins, width=width, height=height, device=device) num_events_in_window = event_window.shape[0] reconstructor.update_reconstruction( event_tensor, start_index + num_events_in_window, last_timestamp) start_index += num_events_in_window
from utils.loading_utils import load_model, get_device import torch from model.model import * from torch2trt import torch2trt path = './pretrained/firenet_1000.pth.tar' model = load_model(path) for x in model.parameters(): x.requires_grad = False device = get_device('True') model = model.to(device) model.half() model.eval() curr_state = torch.zeros((1,5,240,320),dtype=torch.float16) curr_state = curr_state.to(device) prev_state1 = torch.zeros((1,16,240,320),dtype=torch.float16) prev_state1 = prev_state1.to(device) prev_state2 = torch.zeros((1,16,240,320),dtype=torch.float16) prev_state2 = prev_state2.to(device) prev_state = [prev_state1,prev_state2] state = [curr_state,prev_state1,prev_state2]