Пример #1
0
    def __init__(self, mode, distributed, dataset, data_format, seed=None):

        if mode not in ['train', 'inference', 'iotest']:
            raise Exception("Larcv Fetcher can't handle mode ", mode)

        random_access_mode = dataset.access_mode

        if random_access_mode != "serial_access" and mode == "inference":
            logger.warn("Using random blocks in inference - possible bug!")

        if distributed:
            from larcv import distributed_queue_interface
            self._larcv_interface = distributed_queue_interface.queue_interface(
                random_access_mode=random_access_mode, seed=seed)
        else:
            from larcv import queueloader
            self._larcv_interface = queueloader.queue_interface(
                random_access_mode=random_access_mode, seed=seed)

        self.mode = mode
        self.image_mode = data_format
        self.input_dimension = dataset.dimension
        self.distributed = distributed

        self.writer = None
Пример #2
0
    def __init__(self,
                 mode,
                 distributed,
                 image_mode,
                 label_mode,
                 input_dimension,
                 seed=None):

        if mode not in ['train', 'inference', 'iotest']:
            raise Exception("Larcv Fetcher can't handle mode ", mode)

        if mode == "inference":
            random_access_mode = "serial_access"
        else:
            random_access_mode = "random_blocks"

        if distributed:
            from larcv import distributed_queue_interface
            self._larcv_interface = distributed_queue_interface.queue_interface(
                random_access_mode=random_access_mode, seed=seed)
        else:
            from larcv import queueloader
            self._larcv_interface = queueloader.queue_interface(
                random_access_mode=random_access_mode, seed=seed)

        self.mode = mode
        self.image_mode = image_mode
        self.label_mode = label_mode
        self.input_dimension = input_dimension

        self.writer = None
def create_interface_object(args):

    config = build_config_file(args)

    if args.distributed:
        if args.io_mode == 'queue':
            larcv_interface = distributed_queue_interface.queue_interface(
                random_access_mode=args.event_order)
        else:
            larcv_interface = distributed_larcv_interface.thread_interface()
    else:
        if args.io_mode == 'queue':
            larcv_interface = queueloader.queue_interface(
                random_access_mode=args.event_order)
        else:
            larcv_interface = threadloader.thread_interface()

    # Generate a named temp file:
    main_file = tempfile.NamedTemporaryFile(mode='w', delete=False)
    main_file.write(config)

    main_file.close()

    # Prepare data managers:
    io_config = {
        'filler_name': 'IOTest',
        'filler_cfg': main_file.name,
        'verbosity': 5,
        'make_copy': True
    }

    # By default, fetching data and label as the keywords from the file:
    data_keys = OrderedDict({'image': 'data', 'label': 'label'})

    if args.distributed:
        if args.io_mode == 'queue':
            larcv_interface.prepare_manager('primary',
                                            io_config,
                                            COMM.Get_size() *
                                            args.local_batch_size,
                                            data_keys,
                                            color=0)
        else:
            larcv_interface.prepare_manager(
                'primary', io_config,
                COMM.Get_size() * args.local_batch_size, data_keys)
    else:
        if args.io_mode == 'queue':
            larcv_interface.prepare_manager('primary', io_config,
                                            args.local_batch_size, data_keys)
        else:
            larcv_interface.prepare_manager('primary', io_config,
                                            args.local_batch_size, data_keys)

    return larcv_interface
Пример #4
0
    def __init__(self):
        # Rely on the base class for most standard parameters, only
        # search for parameters relevant for distributed computing here

        # Put the IO rank as the last rank in the COMM, since rank 0 does tf saves
        root_rank = hvd.size() - 1

        if FLAGS.COMPUTE_MODE == "GPU":
            os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank())


        self._larcv_interface = queue_interface()
        self._iteration       = 0
        self._rank            = hvd.rank()
        self._cleanup         = []
    def __init__(self, distributed, dataset, seed=0):

        random_access_mode = dataset.access_mode

        if distributed:
            from larcv import distributed_queue_interface
            self._larcv_interface = distributed_queue_interface.queue_interface(
                random_access_mode=random_access_mode.name, seed=seed)
        else:
            from larcv import queueloader
            self._larcv_interface = queueloader.queue_interface(
                random_access_mode=random_access_mode.name, seed=seed)

        self.distributed = distributed
        self.dataset = dataset

        self.writer = None
Пример #6
0
    def __init__(self,
                 mode,
                 distributed,
                 downsample,
                 dataformat,
                 synthetic,
                 sparse,
                 seed=None):

        if mode not in ['train', 'inference', 'iotest']:
            raise Exception("Larcv Fetcher can't handle mode ", mode)

        if not synthetic:

            if distributed:
                from larcv import distributed_queue_interface
                self._larcv_interface = distributed_queue_interface.queue_interface(
                )
            else:
                from larcv import queueloader
                if mode == "inference":
                    self._larcv_interface = queueloader.queue_interface(
                        random_access_mode="serial_access", seed=seed)
                elif mode == "train" or mode == "iotest":
                    self._larcv_interface = queueloader.queue_interface(
                        random_access_mode="random_blocks", seed=seed)
                else:
                    # Must be synthetic
                    self._larcv_interface = None

        self.mode = mode
        self.downsample = downsample
        self.dataformat = dataformat
        self.synthetic = synthetic
        self.sparse = sparse

        self.writer = None

        # Compute the realized image shape:
        self.full_image_shape = [
            self.FULL_RESOLUTION_H, self.FULL_RESOLUTION_W
        ]
        self.ds = 2**downsample

        self.image_shape = [int(i / self.ds) for i in self.full_image_shape]
Пример #7
0
def load_data():
    ''' initialize data loading '''
    # config files
    main_fname = os.environ['HOME'] + '/bb2nu_rl_dnn/larcvconfig_train_lr.txt'
    aux_fname = os.environ['HOME'] + '/bb2nu_rl_dnn/larcvconfig_test_lr.txt'
    # initilize io
    root_rank = hvd.size() - 1
    #_larcv_interface = queue_interface( random_access_mode="serial_access" )
    #_larcv_interface = queue_interface( random_access_mode="random_events" )
    _larcv_interface = queue_interface(random_access_mode="random_blocks")

    # Prepare data managers:
    io_config = {
        'filler_name': 'TrainIO',
        'filler_cfg': main_fname,
        'verbosity': 1,
        'make_copy': True
    }
    aux_io_config = {
        'filler_name': 'TestIO',
        'filler_cfg': aux_fname,
        'verbosity': 1,
        'make_copy': True
    }
    # Build up the data_keys:
    data_keys = OrderedDict()
    data_keys['image'] = 'data'
    data_keys['label'] = 'label'
    aux_data_keys = OrderedDict()
    aux_data_keys['image'] = 'test_data'
    aux_data_keys['label'] = 'test_label'

    _larcv_interface.prepare_manager('train',
                                     io_config,
                                     global_batch_size,
                                     data_keys,
                                     color=0)
    _larcv_interface.prepare_manager('test',
                                     aux_io_config,
                                     global_batch_size,
                                     aux_data_keys,
                                     color=0)

    return _larcv_interface
Пример #8
0
    def __init__(self):
        # Rely on the base class for most standard parameters, only
        # search for parameters relevant for distributed computing here

        # Put the IO rank as the last rank in the COMM, since rank 0 does tf saves
        root_rank = hvd.size() - 1 

        if self.args.COMPUTE_MODE == "GPU":
            os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank())
            

        self._larcv_interface = queue_interface()#read_option='read_from_single_local_rank')
        self._iteration       = 0
        self._rank            = hvd.rank()
        self._cleanup         = []
        self._global_step     = torch.as_tensor(-1)

        if self._rank == 0:
            self.args.dump_config()
Пример #9
0
    def __init__(self, distributed, seed=None, inference=False):

        self._cleanup = []
        self._eventID_labels   = {}
        self._eventID_energies = {}

        if inference:
            random_access_mode = "serial_access"
        else:
            random_access_mode = "random_blocks"

        self._color = None
        if distributed:
            from larcv import distributed_queue_interface
            self._larcv_interface = distributed_queue_interface.queue_interface(random_access_mode=random_access_mode)
            self._color = 0
        else:
            from larcv import queueloader
            self._larcv_interface = queueloader.queue_interface(random_access_mode=random_access_mode, seed=None)

        self.inference = inference