コード例 #1
0
    def __init__(self,
                 broker_address,
                 larcv_supera_file,
                 output_larcv_filename,
                 adc_producer="wire",
                 chstatus_producer="wire",
                 tick_backwards=False,
                 infill_tree_name="infill",
                 **kwargs):
        """
        """
        super(UBInfillClient, self).__init__(broker_address, **kwargs)

        # setup the input and output larcv iomanager, input larlite manager
        tick_direction = larcv.IOManager.kTickForward
        if tick_backwards:
            tick_direction = larcv.IOManager.kTickBackward
        self._inlarcv = larcv.IOManager(larcv.IOManager.kREAD, "",
                                        tick_direction)
        self._inlarcv.add_in_file(larcv_supera_file)
        self._inlarcv.initialize()

        self._outlarcv = larcv.IOManager(larcv.IOManager.kWRITE)
        self._outlarcv.set_out_file(output_larcv_filename)
        self._outlarcv.initialize()
        self._log = logging.getLogger(__name__)

        FixedCROIFromFlash = ublarcvapp.UBSplitDetector
        self._infill_tree_name = infill_tree_name
        self._adc_producer = adc_producer
        self._chstatus_producer = chstatus_producer

        self._ubsplitdet = None
コード例 #2
0
ファイル: UBSSNetClient.py プロジェクト: LArbys/ublarcvserver
    def __init__(self,
                 broker_address,
                 larcv_supera_file,
                 output_larcv_filename,
                 larlite_opreco_file=None,
                 apply_opflash_roi=True,
                 adc_producer="wire",
                 opflash_producer="simpleFlashBeam",
                 tick_backwards=False,
                 ssnet_tree_name="ssnet",
                 intimewin_min_tick=190,
                 intimewin_max_tick=320,
                 **kwargs):
        """
        """
        super(UBSSNetClient, self).__init__(broker_address, **kwargs)

        # setup the input and output larcv iomanager, input larlite manager
        tick_direction = larcv.IOManager.kTickForward
        if tick_backwards:
            tick_direction = larcv.IOManager.kTickBackward
        self._inlarcv = larcv.IOManager(larcv.IOManager.kREAD, "",
                                        tick_direction)
        self._inlarcv.add_in_file(larcv_supera_file)
        self._inlarcv.initialize()

        LArliteManager = ublarcvapp.LArliteManager
        self._inlarlite = None
        if larlite_opreco_file is not None:
            self._inlarlite = LArliteManager(larlite.storage_manager.kREAD)
            self._inlarlite.add_in_filename(larlite_opreco_file)
            self._inlarlite.open()
            #self._inlarlite.set_verbosity(0)

        self._outlarcv = larcv.IOManager(larcv.IOManager.kWRITE)
        self._outlarcv.set_out_file(output_larcv_filename)
        self._outlarcv.initialize()
        self._log = logging.getLogger(__name__)

        FixedCROIFromFlash = ublarcvapp.ubdllee.FixedCROIFromFlashAlgo
        self._ssnet_tree_name = ssnet_tree_name
        self._adc_producer = adc_producer
        self._opflash_producer = opflash_producer
        self._apply_opflash_roi = apply_opflash_roi
        if self._apply_opflash_roi:
            self._croi_fromflash_algo = FixedCROIFromFlash()
            self._intimewin_min_tick = intimewin_min_tick
            self._intimewin_max_tick = intimewin_max_tick
        else:
            self._ubsplitdet = None
コード例 #3
0
 def __init__(self,
              identity,
              inputfile,
              ipaddress,
              load_func,
              batchsize=None,
              verbosity=0,
              queuesize=1,
              randomaccess=True):
     super(LArCVServerWorker, self).__init__(identity,
                                             ipaddress,
                                             verbosity=verbosity,
                                             queuesize=queuesize)
     self.inputfile = inputfile
     self.io = larcv.IOManager(larcv.IOManager.kREAD)
     self.io.add_in_file(self.inputfile)
     self.io.initialize()
     self.nentries = self.io.get_n_entries()
     self.batchsize = batchsize
     self.products = {}
     self.compression_level = 4
     self.print_msg_size = False
     self.num_reads = 0
     self.current_idx = 0
     self.randomaccess = randomaccess
     self.load_func = load_func
     if not callable(self.load_func.loadevent):
         raise ValueError(
             "'load_func' argument needs to be a function returning a dict of numpy arrays"
         )
     print "LArCVServerWorker[{}] is loaded.".format(self._identity)
コード例 #4
0
def prepare(input_files, output_file):
    """
    Prepares larcv IO manager.
    input_files is a string list of input data files
    output_file is a string name of output file
    return is larcv.IOManager instance pointer
    """
    outfile = output_file
    infile = "["
    for fname in input_files:
        infile += '"%s",' % fname
    infile = infile.rstrip(',')
    infile += ']'

    cfg = '''
    IOManager: {
      IOMode: 2
      OutFileName: "%s"
      InputFiles: %s
    }
    '''
    cfg = cfg % (outfile, str(infile))
    cfg_file = tempfile.NamedTemporaryFile('w')
    cfg_file.write(cfg)
    cfg_file.flush()
    from larcv import larcv
    io = larcv.IOManager(cfg_file.name)
    io.initialize()
    return io
コード例 #5
0
def count_2d(file_name, product, producer):
    io = larcv.IOManager()
    io.add_in_file(file_name)
    io.initialize()
    voxel_counts = numpy.zeros((io.get_n_entries(), 3))

    for i in range(io.get_n_entries()):
        io.read_entry(i)
        image = larcv.EventSparseTensor2D.to_sparse_tensor(io.get_data("sparse2d", producer))
        for plane in [0,1,2]:
            voxel_counts[i][plane] = image.as_vector()[plane].size()
            meta = image.as_vector()[plane].meta()
        # image3d = io.get_data("sparse3d", "sbndvoxels")
        # voxel_counts3d[i] = image3d.as_vector().size()

        if i % 1000 == 0:
            print("On entry ", i, " of ", io.get_n_entries())

        if i > 100:
            break

    print ("Average Voxel Occupation: ")
    for p in [0,1,2]:
        print("  {p}: {av:.2f} +/- {rms:.2f} ({max} max)".format(
            p   = p, 
            av  = numpy.mean(voxel_counts[:,p]), 
            rms = numpy.std(voxel_counts[:,p]), 
            max = numpy.max(voxel_counts[:,p])
            )
        )
    print("Image shapes in dense representation are: ")
    for p in [0,1,2]:
        print(image.as_vector()[plane].meta().dump())
コード例 #6
0
 def __init__(self, *args):
     self.iom = larcv.IOManager(0)
     for each in args:
         self.iom.add_in_file(each)
     self.iom.initialize()
     self.counter = 1
     super(UbooneDataset, self).__init__()
コード例 #7
0
    def __init__(self, inputfile, products, store_eventids=False, randomize=False ):

        super(LArCV1Dataset,self).__init__()

        # inputs
        # cfgfile: path to configuration. see test.py.ipynb for example of configuration
        self.inputfiles = []
        if type(inputfile) is str:
            self.inputfiles.append( inputfile )
        elif type(inputfile) is list:
            self.inputfiles = inputfile
        else:
            raise ValueError("LArCV1Dataset inputfile variable should be str or list. is {}".format(type(inputfile)))

        if type(products) is not list:
            raise ValueError("LArCV1Dataset 'products' variable should be a list with (product type,tree name)")
        self.products = products

        self.store_eventids = store_eventids
        self.randomize = randomize

        self.io = larcv.IOManager( larcv.IOManager.kREAD )
        for f in self.inputfiles:
            self.io.add_in_file( f )
        self.io.initialize()

        self.nentries = self.io.get_n_entries()

        # batch info, not set until later
        self._batch_size = None

        self.current_entry = 0
        self.delivered = 0
        self.permuted = None
        return
コード例 #8
0
def convert_training_set(top_input_path, output_path, glob_filter="*.h5"):
    files = glob.glob(top_input_path + glob_filter)

    n_files = len(files)

    # # Open the input file:
    training_file_index = int(0.75 * n_files)
    testing_file_index = int(0.825 * n_files)

    file_dictionary = {
        "train": files[:training_file_index],
        "test": files[training_file_index:testing_file_index],
        "val": files[testing_file_index:-1]
    }

    for mode in ['train', 'test', 'val']:

        if mode == 'test': continue
        if mode == 'val': continue

        output = output_path + "next_new_classification_{}.h5".format(mode)
        io_manager = larcv.IOManager(larcv.IOManager.kWRITE)
        io_manager.set_out_file(output)
        io_manager.initialize()

        for f in file_dictionary[mode]:
            convert_file(io_manager, f, is_mc=True)

        io_manager.finalize()
コード例 #9
0
ファイル: dlevent.py プロジェクト: LArbys/lardly
    def _load_io_managers(self, fileset):
        self._lcvio = larcv.IOManager(larcv.IOManager.kREAD, "ioforward",
                                      larcv.IOManager.kTickForward)
        self._lcvio_backward = larcv.IOManager(larcv.IOManager.kREAD,
                                               "ioforward",
                                               larcv.IOManager.kTickBackward)
        self._llio = larlite.storage_manager(larlite.storage_manager.kREAD)

        nlcvio = 0
        nlcvio_backward = 0
        nllio = 0

        for ifile, fname in enumerate(fileset["larcv"]):
            if fileset["tickbackward"][ifile]:
                self._lcvio_backward.add_in_file(fname)
                nlcvio_backward += 1
            else:
                self._lcvio.add_in_file(fname)
                nlcvio += 1

        for fname in fileset["larlite"]:
            self._llio.add_in_filename(fname)
            nllio += 1

        if nlcvio:
            self._lcvio.initialize()
        else:
            self._lcvio = None

        if nlcvio_backward:
            self._lcvio_backward.initialize()
        else:
            self._lcvio_backward = None

        if nllio:
            self._llio.open()
        else:
            self._llio = None
コード例 #10
0
def main():
    directory = "example_files/"
    # for _file in ["NC_larcv_dev.h5", "beam_larcv_dev.h5", "nueCC_larcv_dev.h5", "numuCC_larcv_dev.h5"]:
    for _file in [
            "anatree_beam.h5",
    ]:
        iom = larcv.IOManager(larcv.IOManager.kREAD)
        iom.add_in_file(directory + _file)
        iom.initialize()

        for entry in range(iom.get_n_entries()):
            iom.read_entry(entry)
            neutrino = larcv.EventParticle.to_particle(
                iom.get_data("particle", "neutrino"))
            particles = larcv.EventParticle.to_particle(
                iom.get_data("particle", "segment"))
            clusters = larcv.EventSparseCluster2D.to_sparse_cluster(
                iom.get_data("cluster2d", "segment"))
            clusters3D = larcv.EventSparseCluster3D.to_sparse_cluster(
                iom.get_data("cluster3d", "segment"))
            x_clusters = clusters.as_vector()[0]
            y_clusters = clusters.as_vector()[1]
            z_clusters = clusters.as_vector()[2]
            # print()
            # if (particles.size() < 25):
            #     print(entry)
            #     print(particles.size())
            # print(x_clusters.size())
            # print(y_clusters.size())
            # print(z_clusters.size())

            # for i in range(particles.size()):
            #     particle = particles.as_vector()[i]
            #     x_pix = x_clusters.as_vector()[i]
            #     y_pix = y_clusters.as_vector()[i]
            #     z_pix = z_clusters.as_vector()[i]
            #     print("ID {ID} is {pdg} by {parent}, process {proc}, E={energy} ".format(
            #         ID  = particle.track_id(),
            #         pdg = particle.pdg_code(),
            #         parent = particle.parent_track_id(),
            #         energy = particle.energy_init(),
            #         proc   = particle.creation_process()
            #         ))

            #     print("--",x_pix.size(), numpy.sum(x_pix.values()))
            #     print("--",y_pix.size(), numpy.sum(x_pix.values()))
            #     print("--",z_pix.size(), numpy.sum(x_pix.values()))

            print(_file, neutrino.as_vector().front().pdg_code())
            print(_file, neutrino.as_vector().front().nu_current_type())
コード例 #11
0
ファイル: ubhiresdata.py プロジェクト: LArbys/ubhires
    def _setupBranches(self, config):
        """
        load the larcv iomanager
        """
        print "IOMANAGER/BRANCH SETUP"

        self.ioman = larcv.IOManager(larcv.IOManager.kREAD, "IO")

        with open(self.config["filelist"], 'r') as f:
            lines = f.readlines()
            for l in lines:
                l = l.strip()
                self.ioman.add_in_file(l)
        self.ioman.initialize()
コード例 #12
0
ファイル: ubhiresdata.py プロジェクト: LArbys/ubhires
    def setup(self, bottom, top):
        """
        seems to be a required method for a PythonDataLayer
        """

        # get parameters
        params = eval(self.param_str)
        with open(params['configfile'], 'r') as f:
            self.config = yaml.load(f)

        self.batch_size = self.config["batch_size"]
        self._setupBranches(self.config)

        meanio = larcv.IOManager(larcv.IOManager.kREAD, "IOmean")
        meanio.add_in_file(self.config["meanfile"])
        meanio.initialize()
        mean_evtimg = meanio.get_data(larcv.kProductImage2D, "mean")
        self.nchannels = int(mean_evtimg.Image2DArray().size())
        self.width = int(mean_evtimg.Image2DArray().at(0).meta().cols())
        self.height = int(mean_evtimg.Image2DArray().at(0).meta().rows())
        self.mean_img = np.zeros((self.nchannels, self.width, self.height),
                                 dtype=np.float)
        for ch, img2d in enumerate(mean_evtimg.Image2DArray()):
            self.mean_img[ch, ...] = larcv.as_ndarray(img2d)[...]

        # set the blob sizes I guess
        data_shape = (self.batch_size, self.nchannels, self.width, self.height)
        label_shape = (self.batch_size, )
        eventid_shape = (self.batch_size, 5)
        top[0].reshape(*data_shape)
        top[1].reshape(*label_shape)
        top[2].reshape(*eventid_shape)

        # depending on the run mode, we setup the queue
        self.event_queue = Queue()
        if self.config["run_mode"] in ["sequential", "randomize"]:
            # setup the queue
            self.event_thread = Thread(target=fill_event_queue,
                                       args=(self.ioman, self.mean_img,
                                             self.event_queue,
                                             self.batch_size * 2, self.config))
            self.event_thread.setDaemon(True)
            self.event_thread.start()
        elif self.config["run_mode"] == "selection":
            self.batch_size = 1
        else:
            raise ValueError(
                "unrecognized run_mode. either [sequential,randomize,selection]"
            )
コード例 #13
0
def convert_data_set(top_input_path, output_path, glob_filter="*.h5"):
    files = glob.glob(top_input_path + glob_filter)

    print(files)
    n_files = len(files)

    # Each data file is processed independently
    for f in files:

        output = os.path.basename(f.replace(".h5", "_larcv.h5"))
        output = output_path + "/" + output
        io_manager = larcv.IOManager(larcv.IOManager.kWRITE)
        io_manager.set_out_file(output)
        io_manager.initialize()

        convert_file(io_manager, f, is_mc=False)

        io_manager.finalize()
コード例 #14
0
    def __init__( self,identity,inputfiles,ipaddress,load_func,
                  seed=None,batchsize=None,verbosity=0,tickbackward=False,
                  fetch_ntries=100,func_params={},readonly_products=None):
        super( LArCVServerWorker, self ).__init__(identity,ipaddress,verbosity=verbosity)

        self.tickorder = larcv.IOManager.kTickForward
        if tickbackward:
            self.tickorder = larcv.IOManager.kTickBackward

        if type(inputfiles)==str:
            self.inputfiles = [inputfiles]
        elif type(inputfiles)==list:
            self.inputfiles = inputfiles
        else:
            raise ValueError("'inputfile' must be type 'str' or 'list of str'")
            
        self.io = larcv.IOManager(larcv.IOManager.kREAD,"",self.tickorder)
        for f in self.inputfiles:
            self.io.add_in_file(f)

        if readonly_products is not None:
            if type(readonly_products) is not list and type(readonly_products) is not tuple:
                raise ValueError("readonly_products argument should be a list or tuple of (\"name\",larcv type) pairs")
            for product in readonly_products:
                if len(product)<2 or type(product[1]) is not int or type(product[0]) is not str:
                    raise ValueError("readonly_products argument should be a list or tuple of (\"name\",larcv type) pairs")
                self.io.specify_data_read( product[1], product[0] )
            
        self.io.initialize()
        
        self.nentries = self.io.get_n_entries()
        self.batchsize = batchsize
        self.compression_level = 4
        self.print_msg_size = False
        self.num_reads    = 0
        self.load_func    = load_func
        self.func_params  = func_params
        self.seed         = seed
        self.fetch_ntries = fetch_ntries
        np.random.seed(seed=self.seed)

        if not callable(self.load_func):
            raise ValueError("'load_func' argument needs to be a function returning a dict of numpy arrays")
        print "LArCVServerWorker[{}] is loaded.".format(self._identity)
コード例 #15
0
def count_3d(file_name, product, producer):
    io = larcv.IOManager()
    io.add_in_file(file_name)
    io.initialize()
    voxel_counts3d = numpy.zeros((io.get_n_entries(), 1))
    for i in range(io.get_n_entries()):
        io.read_entry(i)
        image3d = larcv.EventSparseTensor3D.to_sparse_tensor(
            io.get_data("sparse3d", "sbndvoxels"))
        voxel_counts3d[i] = image3d.as_vector()[0].size()

        if i % 100 == 0:
            print("On entry ", i, " of ", io.get_n_entries())

        # if i > 100:
        #     break
    print(" 3D: {av:.2f} +/- {rms:.2f} ({max} max)".format(
        av=numpy.mean(voxel_counts3d[:]),
        rms=numpy.std(voxel_counts3d[:]),
        max=numpy.max(voxel_counts3d[:])))
コード例 #16
0
 def __init__(self,
              identity,
              inputfile,
              ipaddress,
              load_func,
              batchsize=None,
              verbosity=0,
              io_tickbackward=True,
              func_params={}):
     super(LArCVServerWorker, self).__init__(identity,
                                             ipaddress,
                                             verbosity=verbosity)
     if type(inputfile) == str:
         self.inputfile = [inputfile]
     elif type(inputfile) != list:
         raise ValueError(
             "input variable 'inputfile' must be type 'str' or 'list' of 'str'"
         )
     else:
         self.inputfile = inputfile
     print "inputfile: ", self.inputfile, " type=", type(self.inputfile)
     tickorder = larcv.IOManager.kTickForward
     if io_tickbackward:
         tickorder = larcv.IOManager.kTickBackward
     self.io = larcv.IOManager(larcv.IOManager.kREAD, "", tickorder)
     for f in self.inputfile:
         self.io.add_in_file(f)
     self.io.initialize()
     self.nentries = self.io.get_n_entries()
     self.batchsize = batchsize
     self.products = {}
     self.compression_level = 4
     self.print_msg_size = False
     self.num_reads = 0
     self.load_func = load_func
     self.func_params = func_params
     if not callable(self.load_func):
         raise ValueError(
             "'load_func' argument needs to be a function returning a dict of numpy arrays"
         )
     print "LArCVServerWorker[{}] is loaded.".format(self._identity)
コード例 #17
0
    def __init__(self, filename):
        object.__init__(self)


        self._io_manager = larcv.IOManager()
        self._io_manager.add_in_file(filename)
        self._io_manager.initialize()

        n_entries = self._io_manager.get_n_entries()

        self._total_accuracy = [
            numpy.zeros([n_entries]),
            numpy.zeros([n_entries]),
            numpy.zeros([n_entries]),
        ]

        self._active_accuracy = [
            numpy.zeros([n_entries]),
            numpy.zeros([n_entries]),
            numpy.zeros([n_entries]),
        ]

        self.dtypes = numpy.dtype([
            ('entry'      , numpy.uint32),
            ('neut'       , numpy.uint32),
            ('n_neut_true', numpy.uint32,  3),
            ('n_neut_pred', numpy.uint32,  3),
            ('neut_x_mean', numpy.float32, 3),
            ('neut_y_mean', numpy.float32, 3),
            ('neut_x_std' , numpy.float32, 3),
            ('neut_y_std' , numpy.float32, 3),
            ('accuracy'   , numpy.float32, 3),
            ('acc_neut'   , numpy.float32, 3),
            ('acc_cosm'   , numpy.float32, 3),
            ('iou_neut'   , numpy.float32, 3),
            ('iou_cosm'   , numpy.float32, 3),
            ('acc_non0'   , numpy.float32, 3),
            ('energy'     , numpy.float32),
        ])
コード例 #18
0
    def initialize(self):
        from larcv import larcv
        from larcv.dataloader2 import larcv_threadio
        self._input_cfg = make_input_larcv_cfg(self._flags)
        cfg = {
            'filler_name': 'MainIO',
            'verbosity': 0,
            'filler_cfg': self._input_cfg.name
        }
        self._ihandler = larcv_threadio()
        self._ihandler.configure(cfg)
        self._ihandler.start_manager(self.batch_per_step())
        self._ihandler.next(store_entries=True, store_event_ids=True)
        self._next_counter = 0
        self._num_entries = self._ihandler._proc.pd().io().get_n_entries()
        self._num_channels = self._ihandler.fetch_data(
            self._flags.DATA_KEYS[0]).dim()[-1]

        if self._flags.OUTPUT_FILE:
            self._output_cfg = make_output_larcv_cfg(self._flags)
            self._fout = larcv.IOManager(self._output_cfg.name)
            self._fout.initialize()
コード例 #19
0
    def __init__(self,larcv_input_file):

        # we setup a larcv IOManager for read mode
        self.io = larcv.IOManager( larcv.IOManager.kREAD )
        self.io.add_in_file( larcv_input_file )
        self.io.initialize()

        # we setup some image processor modules

        # split a whole image into 3D-consistent chunks
        # the module will return bounding box defintions
        # the event loop will do the slicing
        ubsplit_cfg="""
        InputProducer: \"wire\"
        OutputBBox2DProducer: \"detsplit\"
        CropInModule: false
        OutputCroppedProducer: \"detsplit\"
        BBoxPixelHeight: 512
        BBoxPixelWidth: 832
        CoveredZWidth: 310
        FillCroppedYImageCompletely: true
        DebugImage: false
        MaxImages: 1000
        RandomizeCrops: false
        MaxRandomAttempts: 1000
        MinFracPixelsInCrop: 0.0
        """
        fcfg = open("ubsplit.cfg",'w')
        print >>fcfg,ubsplit_cfg
        fcfg.close()
        split_pset = larcv.CreatePSetFromFile( "ubsplit.cfg", "UBSplitDetector" )
        self.split_algo = larcv.UBSplitDetector()
        self.split_algo.configure(split_pset)
        self.split_algo.initialize()
        self.split_algo.set_verbosity(0)        

        self._nentries = self.io.get_n_entries()
コード例 #20
0
    def initialize(self):
    # Instantiate and configure
        if not self._cfg.MAIN_INPUT_CONFIG:
            print('Must provide larcv data filler configuration file!')
            return 
        #
        # Data IO configuration
        #
        # Main input stream
        self._input_main = larcv_threadio()
        filler_cfg = {'filler_name' : 'ThreadProcessor',
                      'verbosity'   : 0, 
                      'filler_cfg'  : self._cfg.MAIN_INPUT_CONFIG}
        self._input_main.configure(filler_cfg)
        self._input_main.start_manager(self._cfg.MINIBATCH_SIZE)

        # Test input stream (optional)
        if self._cfg.TEST_INPUT_CONFIG:
            self._input_test = larcv_threadio()
            filler_cfg = {'filler_name' : 'TestIO',
                          'verbosity'   : 0,
                          'filler_cfg'  : self._cfg.TEST_INPUT_CONFIG}
            self._input_test.configure(filler_cfg)
            self._input_test.start_manager(self._cfg.TEST_BATCH_SIZE)

        # Output stream (optional)
        if self._cfg.ANA_OUTPUT_CONFIG:
            self._output = larcv.IOManager(self._cfg.ANA_OUTPUT_CONFIG)
            self._output.initialize()

        # Retrieve image/label dimensions
        self._input_main.next(store_entries   = (not self._cfg.TRAIN),
                              store_event_ids = (not self._cfg.TRAIN))
        dim_data = self._input_main.fetch_data(self._cfg.KEYWORD_DATA).dim()
        
        return dim_data
コード例 #21
0
def deploy_sparselarflow_on_files(larcv_outfile,
                                  larlite_outfile,
                                  filelist,
                                  weightfile,
                                  model_name="dualflow_classvec_v2",
                                  adc_producer="wire",
                                  chstatus_producer='wire',
                                  cropper_cfg="cropflow_processor.cfg",
                                  flow="dual",
                                  devicename="cpu",
                                  run_reco_flowhits=True,
                                  run_truth_flowhits=True,
                                  save_full_adc=False,
                                  save_cropped_adc=False,
                                  save_cropped_trueflow=False,
                                  run_stitcher=False,
                                  has_mc=False,
                                  threshold=10.0,
                                  maxentries=-1):

    import numpy as np
    import torch
    from larlite import larlite
    from larcv import larcv
    from ublarcvapp import ublarcvapp
    from larflow import larflow
    from ROOT import std

    from sparsemodels import load_models
    from load_cropped_sparse_dualflow import load_croppedset_sparse_dualflow_nomc

    device = torch.device(devicename)
    model = load_models(model_name, weight_file=weightfile, device=devicename)
    model.eval()

    out = larcv.IOManager(larcv.IOManager.kWRITE, "stitched")
    out.set_out_file(larcv_outfile)
    out.initialize()

    out_ll = larlite.storage_manager(larlite.storage_manager.kWRITE)
    out_ll.set_out_filename(larlite_outfile)
    out_ll.open()

    dt_tot = 0.0
    dt_net = 0.0  # running the network
    dt_data = 0.0  # preparing data (split/crop)
    dt_aten = 0.0  # turn data into torch tensors
    dt_flow = 0.0  # making flow
    dt_result = 0.0  # preparing output images

    ttot = time.time()

    # first create cfg file if does not exist
    if not os.path.exists(cropper_cfg):
        print("Writing new copper config: ", cropper_cfg)
        from crop_processor_cfg import fullsplit_processor_config
        f = open(cropper_cfg, 'w')
        f.write(fullsplit_processor_config(adc_producer, chstatus_producer))
        f.close()

    splitter = larcv.ProcessDriver("ProcessDriver")
    print("CONFIGURE SPLITTER: ", cropper_cfg)
    splitter.configure(cropper_cfg)

    # add files to iomanager
    io = splitter.io_mutable()

    if type(filelist) is str:
        filelist = [filelist]

    for inputfile in filelist:
        io.add_in_file(inputfile)

    # initialize splitter
    splitter.initialize()
    nentries = io.get_n_entries()
    if maxentries > 0 and maxentries < nentries:
        nentries = maxentries

    nimgs = 0
    nevents = 0
    for ientry in xrange(nentries):

        tdata = time.time()
        io.read_entry(ientry)
        ev_img = io.get_data(larcv.kProductImage2D, adc_producer)

        run = ev_img.run()
        subrun = ev_img.subrun()
        event = ev_img.event()

        print("[Entry {}] {}".format(ientry, (run, subrun, event)))

        adc_v = ev_img.Image2DArray()
        adc_copy_v = std.vector("larcv::Image2D")()
        for i in xrange(adc_v.size()):
            adc_copy_v.push_back(adc_v.at(i))

        splitter.process_entry(ientry, False, False)

        if run_stitcher:
            stitcher = ublarcvapp.UBSparseFlowStitcher(adc_v)

        ev_crops = io.get_data(larcv.kProductImage2D, "croppedadc")
        crop_v = ev_crops.Image2DArray()
        print("  number of crops: {}".format(crop_v.size()))

        # get sparse numpy arrays
        data = load_croppedset_sparse_dualflow_nomc(io)
        dt_data += time.time() - tdata

        # container for network output
        ev_outdualflow_v = out.get_data(larcv.kProductSparseImage,
                                        "cropdualflow")

        # torch tensors
        for iset, sparse_np in enumerate(data["pixadc"]):

            taten = time.time()

            ncoords = sparse_np.shape[0]
            print("deploy net: iset[{}] ncoords={}".format(iset, ncoords))

            # make tensor for coords (row,col,batch)
            coord_t = torch.from_numpy(sparse_np[:, 0:2].astype(
                np.int32)).to(device)

            # tensor for src pixel adcs
            srcpix_t = torch.from_numpy(sparse_np[:, 4].reshape(
                (ncoords, 1))).to(device)
            # tensor for target pixel adcs
            tarpix_flow1_t = torch.from_numpy(sparse_np[:, 2].reshape(
                (ncoords, 1))).to(device)
            if flow == 'dual':
                tarpix_flow2_t = torch.from_numpy(sparse_np[:, 3].reshape(
                    (ncoords, 1))).to(device)
            else:
                tarpix_flow2_t = None

            dt_aten += time.time() - taten

            # Run NETWORK
            tnet = time.time()
            with torch.set_grad_enabled(False):
                predict1_t, predict2_t = model(coord_t, srcpix_t,
                                               tarpix_flow1_t, tarpix_flow2_t,
                                               1)
            dt_net += time.time() - tnet
            #print("predict1_t shape",predict1_t.features.shape)

            # convert class vector output back to flow
            # find max, then subtract off source pix
            if model_name in ['dualflow_classvec_v2']:
                # get arg max
                maxcol1 = torch.argmax(predict1_t.features.detach(), 1)
                maxcol2 = torch.argmax(predict2_t.features.detach(), 1)
                # subtract source column
                flowout1_t = (maxcol1.type(torch.FloatTensor) -
                              coord_t[:, 1].type(torch.FloatTensor)).reshape(
                                  (ncoords, 1))
                flowout2_t = (maxcol2.type(torch.FloatTensor) -
                              coord_t[:, 1].type(torch.FloatTensor)).reshape(
                                  (ncoords, 1))
            else:
                flowout1_t = predict1_t.features
                flowout2_t = predict2_t.features

            # back to numpy array
            tresult = time.time()

            meta_v = std.vector("larcv::ImageMeta")()
            yplane_meta = crop_v.at(iset * 3 + 2).meta()
            meta_v.push_back(yplane_meta)
            meta_v.push_back(yplane_meta)

            result_np = np.zeros((ncoords, 4), dtype=np.float32)
            result_np[:, 0:2] = sparse_np[:, 0:2]
            result_np[:, 2] = flowout1_t.detach().cpu().numpy()[:, 0]
            result_np[:, 3] = flowout2_t.detach().cpu().numpy()[:, 0]

            # store raw result
            sparse_raw = larcv.sparseimg_from_ndarray(result_np, meta_v,
                                                      larcv.msg.kDEBUG)
            ev_outdualflow_v.Append(sparse_raw)

            # prepare for stitcher
            if run_stitcher:
                result_np[:, 2][sparse_np[:, 4] < 10.0] = -1000
                result_np[:, 3][sparse_np[:, 4] < 10.0] = -1000
                sparse_result = larcv.sparseimg_from_ndarray(
                    result_np, meta_v, larcv.msg.kDEBUG)
                stitcher.addSparseData(sparse_result,
                                       crop_v.at(iset * 3 + 0).meta(),
                                       crop_v.at(iset * 3 + 1).meta())

            dt_result += time.time() - tresult
            nimgs += 1

        # make flow hits
        # --------------
        tflow = time.time()
        if run_reco_flowhits:
            print("Make Reco Flow Hits")
            larflowhits_v = larflow.makeFlowHitsFromSparseCrops(
                adc_v, ev_outdualflow_v.SparseImageArray(), threshold,
                "ubcroptrueflow.cfg", larcv.msg.kINFO)

        if has_mc and run_truth_flowhits:
            print("Make Truth Flow Hits")
            ev_chstatus = io.get_data(larcv.kProductChStatus,
                                      chstatus_producer)
            ev_trueflow = io.get_data(larcv.kProductImage2D, "larflow")
            trueflowhits_v = larflow.makeTrueFlowHitsFromWholeImage(
                adc_v, ev_chstatus, ev_trueflow.Image2DArray(), threshold,
                "ubcroptrueflow.cfg", larcv.msg.kINFO)

        dt_flow += time.time() - tflow

        # store
        # --------
        # full image
        tresult = time.time()

        if save_full_adc:
            out_wire = out.get_data(larcv.kProductImage2D, "wire")
            for p in xrange(3):
                out_wire.Append(adc_v.at(p))

        # cropped image
        if save_cropped_adc:
            out_crop = out.get_data(larcv.kProductImage2D, "cropadc")
            for iimg in xrange(crop_v.size()):
                out_crop.Append(crop_v.at(iimg))
            print("saved ", crop_v.size(), " adc crops")

        if save_cropped_trueflow:
            ev_trueflow_crops = io.get_data(larcv.kProductImage2D,
                                            "croppedflow")
            out_trueflow_crops = out.get_data(larcv.kProductImage2D,
                                              "croptrueflow")
            for iimg in xrange(ev_trueflow_crops.Image2DArray().size()):
                out_trueflow_crops.Append(
                    ev_trueflow_crops.Image2DArray().at(iimg))
            print("saved ",
                  out_trueflow_crops.Image2DArray().size(), " true flow crops")

        # save stitched output
        if run_stitcher:
            out_y2u = out.get_data(larcv.kProductImage2D, "larflowy2u")
            out_y2u.Append(stitcher._outimg_v.at(0))
            out_y2v = out.get_data(larcv.kProductImage2D, "larflowy2v")
            out_y2v.Append(stitcher._outimg_v.at(1))

        # save larflow hits
        if run_reco_flowhits:
            ev_larflowhits = out_ll.get_data(larlite.data.kLArFlow3DHit,
                                             "flowhits")
            for ihit in xrange(larflowhits_v.size()):
                ev_larflowhits.push_back(larflowhits_v.at(ihit))
        if has_mc and run_truth_flowhits:
            ev_trueflowhits = out_ll.get_data(larlite.data.kLArFlow3DHit,
                                              "trueflowhits")
            for ihit in xrange(trueflowhits_v.size()):
                ev_trueflowhits.push_back(trueflowhits_v.at(ihit))

        # set id
        out.set_id(run, subrun, event)
        out_ll.set_id(run, subrun, event)

        # save entry
        out.save_entry()
        out_ll.next_event()

        dt_result = time.time() - tresult

        # clear processor iomanager of  the entry
        io.clear_entry()
        nevents += 1

    dt_tot = time.time() - ttot

    print("Total run time: %.3f secs" % (dt_tot))
    print("  Data loading time: %.3f secs (%.3f secs/event)" %
          (dt_data, dt_data / nevents))
    print("  Prepare data for net: %.3f secs (%.3f secs/image)" %
          (dt_aten, dt_aten / nevents))
    print("  Net running time: %.3f secs (%.3f secs/event, %.3f secs/image)" %
          (dt_net, dt_net / nevents, dt_net / nimgs))
    print("  FlowHits running time: %.3f secs (%.3f secs/image)" %
          (dt_flow, dt_flow / nevents))
    print("  Result conversion: %.3f secs (%.3f secs/image)" %
          (dt_result, dt_result / nevents))

    out.finalize()
    out_ll.close()
    splitter.finalize()

    return None
コード例 #22
0
ファイル: MCNN_uboone.py プロジェクト: ruian1/Mask_RCNN
 def __init__(self,input_file):
     self.iom=larcv.IOManager(0) 
     self.iom.add_in_file(input_file)
     self.iom.initialize()
     self.counter=1
     super(ShapesDataset, self).__init__()
コード例 #23
0
str_input += "]"
print str_input

# ROOT data
from ROOT import std
str_parname = std.string("IOMan2")
#iocfg = larcv.PSet(str_parname,str_iomancfg)
iocfg = larcv.PSet("IOMan2")
iocfg.add_value("Name", "IOMan2")
iocfg.add_value("IOMode", "0")
iocfg.add_value("Verbosity", "2")
iocfg.add_value("InputFiles", str_input)
iocfg.add_value("ReadOnlyType", "[0,0,1]")
iocfg.add_value("ReadOnlyName", "[tpc,pmt,tpc]")

ioman = larcv.IOManager(iocfg)
ioman.initialize()

print "Network Ready: Batch Size=", batch_size
print "[ENTER] to continue."
raw_input()

# setup output

out = rt.TFile("out_%s_netanalysis.root" % (out_tag), "RECREATE")
entry = array('i', [0])
label = array('i', [0])
nuprob = array('f', [0.0])
winpe = array('f', [0.0])

tree = rt.TTree("net", "net output")
コード例 #24
0
ファイル: UBMRCNNClient.py プロジェクト: LArbys/ublarcvserver
    def __init__(self,
                 broker_address,
                 larcv_supera_file,
                 output_larcv_filename,
                 adc_producer="wire",
                 skip_detsplit=True,
                 opflash_producer="simpleFlashBeam",
                 tick_backwards=False,
                 mrcnn_tree_name="mrcnn",
                 use_compression=False,
                 use_sparseimg=True,
                 intimewin_min_tick=190,
                 intimewin_max_tick=320,
                 **kwargs):
        """
        broker_address        [str]   IP address and port of broker. e.g.: tcp://my.server.somwhere:6000
        larcv_supera_file     [str]   path to LArCV root file with whole images
        output_larcv_filename [str]   path to LArCV root where we will write output
        adc_producer          [str]   name of Tree containing input images. e.g. 'wire'
        skip_detsplit         [bool]  if true, process whole image at once. if false, process crops.
        opflash_producer      [str]   name of tree carrying opflash information (used to make CROI from flash) (deprecated)
        tick_backwards        [bool]  if true, expect input LArCV images to be stored in tick-backward format
        mrcnn_tree_name       [str]   name of output tree contaning MRCN
        use_compression       [bool]  if false (default), do not compress byte string sent and received
        use_sparseimg         [bool]  if false (default), do not convert whole image into sparse. otherwisek, do. 
                                      To save bytes transferred.
        intime_min_tick       [int]   Start of Time window  for trigger
        intime_max_tick       [int]   End of Time window  for trigger
        """
        super(UBMRCNNClient, self).__init__(broker_address, **kwargs)

        # setup the input and output larcv iomanager, input larlite manager
        tick_direction = larcv.IOManager.kTickForward
        if tick_backwards:
            tick_direction = larcv.IOManager.kTickBackward
        self._inlarcv = larcv.IOManager(larcv.IOManager.kREAD, "",
                                        tick_direction)
        self._inlarcv.add_in_file(larcv_supera_file)
        self._inlarcv.initialize()
        self.skip_detsplit = skip_detsplit
        self._use_compression = use_compression

        LArliteManager = ublarcvapp.LArliteManager
        self._inlarlite = None
        # if larlite_opreco_file is not None:
        #     self._inlarlite = LArliteManager(larlite.storage_manager.kREAD)
        #     self._inlarlite.add_in_filename(larlite_opreco_file)
        #     self._inlarlite.open()
        #     #self._inlarlite.set_verbosity(0)

        self._outlarcv = larcv.IOManager(larcv.IOManager.kWRITE)
        self._outlarcv.set_out_file(output_larcv_filename)
        self._outlarcv.set_verbosity(larcv.msg.kDEBUG)
        self._outlarcv.initialize()
        self._log = logging.getLogger(__name__)

        FixedCROIFromFlash = ublarcvapp.ubdllee.FixedCROIFromFlashAlgo
        self._mrcnn_tree_name = mrcnn_tree_name
        self._adc_producer = adc_producer
        self._opflash_producer = opflash_producer

        self._ubsplitdet = None

        # MESSAGES TO LOOKFOR
        self._ERROR_NOMSGS = "ERROR:nomessages".encode('utf-8')
コード例 #25
0
import os
import errno
import numpy as np
import ROOT
import torch
import cv2 as cv
from larcv import larcv

# Define an IO manager
_files0 = ["/media/hdd1/larbys/ssnet_dllee_trainingdata/train00.root"]
_files1 = ["/media/hdd1/larbys/ssnet_dllee_trainingdata/train01.root"]
_files2 = ["/media/hdd1/larbys/ssnet_dllee_trainingdata/train02.root"]

iomanager0 = larcv.IOManager(larcv.IOManager.kREAD, "",
                             larcv.IOManager.kTickBackward)
iomanager0.add_in_file(_files0[0])
iomanager0.add_in_file(_files1[0])
iomanager0.add_in_file(_files2[0])
iomanager0.initialize()

# Make sure num imgs is divisible by 16
nEntries = iomanager0.get_n_entries()
if ((nEntries % 4) != 0) and (nEntries > 0):
    while (nEntries % 4) != 0:
        nEntries -= 1

print('nEntries:', nEntries)

inc = 4
index = 0
batch_idx = 0
コード例 #26
0
ファイル: convert_test.py プロジェクト: rsharankova/larcv2
import ROOT,sys
from larlite import larlite as fmwk1
from larcv import larcv as fmwk2
from ROOT import handshake

io1=fmwk1.storage_manager(fmwk1.storage_manager.kBOTH)
io1.add_in_filename(sys.argv[1])
io1.set_out_filename('boke.root')
io1.open()

io2=fmwk2.IOManager(fmwk2.IOManager.kREAD)
io2.add_in_file(sys.argv[2])
io2.initialize()

hs=handshake.HandShaker()

ctr=0
while io1.next_event() and io2.read_entry(ctr):

    ev_pfpart  = io1.get_data(fmwk1.data.kPFParticle, "dl")
    ev_vertex  = io1.get_data(fmwk1.data.kVertex,     "dl")
    ev_shower  = io1.get_data(fmwk1.data.kShower,     "dl")
    ev_track   = io1.get_data(fmwk1.data.kTrack,      "dl")
    ev_cluster = io1.get_data(fmwk1.data.kCluster,    "dl")
    ev_hit     = io1.get_data(fmwk1.data.kHit,        "dl")
    ev_ass     = io1.get_data(fmwk1.data.kAssociation,"dl")

    ev_hit_in  = io1.get_data(fmwk1.data.kHit, "gaushit")
    ev_pgraph  = io2.get_data(fmwk2.kProductPGraph,'test')
    ev_pixel2d = io2.get_data(fmwk2.kProductPixel2D,'test_ctor')
コード例 #27
0
ファイル: run_crtmatch.py プロジェクト: NuTufts/larflow
parser.add_argument('-e',
                    '--start-entry',
                    type=int,
                    default=0,
                    help="Starting entry")

args = parser.parse_args()

import ROOT as rt
from ROOT import std
from larlite import larlite
from larcv import larcv
from larflow import larflow

io = larlite.storage_manager(larlite.storage_manager.kREAD)
iolcv = larcv.IOManager(larcv.IOManager.kREAD, "larcv",
                        larcv.IOManager.kTickBackward)

# INPUTS
io.add_in_filename(args.input_dlmerged)
io.add_in_filename(args.input_cluster)
iolcv.add_in_file(args.input_dlmerged)

io.open()
iolcv.reverse_all_products()
iolcv.initialize()

if ".root" == args.output[-5:]:
    larlite_out = args.output.replace(".root", "_larlite.root")
    larcv_out = args.output.replace(".root", "_larcv.root")
else:
    larlite_out = args.output + "_larlite.root"
コード例 #28
0
    MSG_LEVEL = larcv.msg.kDEBUG
if 'info' in sys.argv:
    MSG_LEVEL = larcv.msg.kINFO

OUT_FNAME = "croproi.root"
NUM_EVENT = 1

ERROR_FILE_EXIST = 1
ERROR_WRITE_INIT = 2

if os.path.isfile(OUT_FNAME):
    cmsg.error("Test output file (%s) already exists..." % OUT_FNAME)
    sys.exit(ERROR_FILE_EXIST)

from larcv import larcv
o = larcv.IOManager(larcv.IOManager.kWRITE)
o.reset()
o.set_verbosity(MSG_LEVEL)
o.set_out_file(OUT_FNAME)

p = larcv.CropROI()
cfg = larcv.CreatePSetFromFile(sys.argv[1], "CropROI")
p.configure(cfg)
p.initialize()

if not o.initialize():
    sys.exit(ERROR_WRITE_INIT)
for idx in xrange(NUM_EVENT):

    img = larcv.Image2D(10, 10)
    for x in xrange(img.as_vector().size()):
コード例 #29
0
    def __init__(self, larcv_input_file, ismc=True):

        # we setup a larcv IOManager for read mode
        self.io = larcv.IOManager(larcv.IOManager.kBOTH)
        self.io.add_in_file(larcv_input_file)
        self.io.set_out_file("baka.root")
        self.io.initialize()

        # we setup some image processor modules

        # split a whole image into 3D-consistent chunks
        # the module will return bounding box defintions
        # the event loop will do the slicing
        ubsplit_cfg = """
        InputProducer: \"wire\"
        OutputBBox2DProducer: \"detsplit\"
        CropInModule: false
        OutputCroppedProducer: \"detsplit\"
        BBoxPixelHeight: 512
        BBoxPixelWidth: 832
        CoveredZWidth: 310
        FillCroppedYImageCompletely: true
        DebugImage: false
        MaxImages: -1
        RandomizeCrops: false
        MaxRandomAttempts: 1000
        MinFracPixelsInCrop: 0.0
        """
        fcfg = open("ubsplit.cfg", 'w')
        print >> fcfg, ubsplit_cfg
        fcfg.close()
        split_pset = larcv.CreatePSetFromFile("ubsplit.cfg", "UBSplitDetector")
        self.split_algo = larcv.UBSplitDetector()
        self.split_algo.configure(split_pset)
        self.split_algo.initialize()
        self.split_algo.set_verbosity(1)

        # cropper for larflow (needed if we do not restitch the output)
        lfcrop_cfg = """Verbosity:0
        InputBBoxProducer: \"detsplit\"
        InputCroppedADCProducer: \"detsplit\"
        InputADCProducer: \"wire\"
        InputVisiProducer: \"pixvisi\"
        InputFlowProducer: \"pixflow\"
        OutputCroppedADCProducer:  \"adc\"
        OutputCroppedVisiProducer: \"visi\"
        OutputCroppedFlowProducer: \"flow\"
        OutputCroppedMetaProducer: \"flowmeta\"
        OutputFilename: \"baka_lf.root\"
        SaveOutput: false
        CheckFlow:  false
        MakeCheckImage: false
        DoMaxPool: false
        RowDownsampleFactor: 2
        ColDownsampleFactor: 2
        MaxImages: -1
        LimitOverlap: false
        RequireMinGoodPixels: false
        MaxOverlapFraction: 0.2
        UseVectorizedCode: true
        IsMC: {}
        """
        flowcrop_cfg = open("ublarflowcrop.cfg", 'w')
        print >> flowcrop_cfg, lfcrop_cfg.format(str(ismc).lower())
        flowcrop_cfg.close()
        flowcrop_pset = larcv.CreatePSetFromFile("ublarflowcrop.cfg",
                                                 "UBLArFlowCrop")
        self.flowcrop_algo = None
        # not yet implemented in larcv1
        #self.flowcrop_algo = larcv.UBCropLArFlow()
        #self.flowcrop_algo.configure( flowcrop_pset )
        #self.flowcrop_algo.initialize()
        #self.flowcrop_algo.set_verbosity(0)
        self.ismc = ismc

        self._nentries = self.io.get_n_entries()
コード例 #30
0
                       gpuid=gpuid,
                       checkpointgpu=checkpoint_gpuid,
                       use_half=use_half)
    model.to(device=torch.device("cuda:%d" % (gpuid)))
    model.eval()

    # set planes
    source_plane = 2
    if FLOWDIR == "y2u":
        target_plane = 0
    elif FLOWDIR == "y2v":
        target_plane = 1

    # output IOManager
    if stitch:
        outputdata = larcv.IOManager(larcv.IOManager.kBOTH)
        outputdata.add_in_file(input_larcv_filename)
    else:
        # if not stiching we will save crops of adc,flow, and visi
        outputdata = larcv.IOManager(larcv.IOManager.kWRITE)
    outputdata.set_out_file(output_larcv_filename)
    outputdata.initialize()

    # LArFlow subimage stitcher
    if stitch:
        stitcher = larcv.UBLArFlowStitcher("flow")
    else:
        stitcher = None

    timing = OrderedDict()
    timing["total"] = 0.0