Пример #1
0
    def __init__(self,
                 larcv_input_file,
                 adc_producer="wire",
                 chstatus_producer="wire",
                 tick_backward=True,
                 ismc=True,
                 workdir="./"):

        # we setup a larcv IOManager for read mode
        tick_direction = larcv.IOManager.kTickForward
        if tick_backward:
            tick_direction = larcv.IOManager.kTickBackward
        self.io = larcv.IOManager(larcv.IOManager.kBOTH, "input",
                                  tick_direction)
        self.io.add_in_file(larcv_input_file)
        self.io.set_out_file("temp_deploy_splitter_file.root")
        self.io.initialize()

        # we setup some image processor modules

        # split a whole image into 3D-consistent chunks
        # the module will return bounding box defintions
        # the event loop will do the slicing
        ubsplit_cfg = """
        InputProducer: \"%s\"
        OutputBBox2DProducer: \"detsplit\"
        CropInModule: true
        OutputCroppedProducer: \"detsplit\"
        BBoxPixelHeight: 512
        BBoxPixelWidth: 832
        CoveredZWidth: 310
        FillCroppedYImageCompletely: true
        DebugImage: false
        MaxImages: -1
        RandomizeCrops: false
        MaxRandomAttempts: 1000
        MinFracPixelsInCrop: 0.0
        """ % (adc_producer)
        fcfg = open(workdir + "/ubsplit.cfg", 'w')
        print >> fcfg, ubsplit_cfg
        fcfg.close()
        split_pset = larcv.CreatePSetFromFile(workdir + "./ubsplit.cfg",
                                              "UBSplitDetector")
        self.split_algo = ublarcvapp.UBSplitDetector()
        self.split_algo.configure(split_pset)
        self.split_algo.initialize()
        self.split_algo.set_verbosity(0)

        # cropper for larflow (needed if we do not restitch the output)
        lfcrop_cfg = """Verbosity:0
        InputBBoxProducer: \"detsplit\"
        InputCroppedADCProducer: \"detsplit\"
        InputADCProducer: \"{}\"
        InputChStatusProducer: \"{}\"
        InputVisiProducer: \"pixvisi\"
        InputFlowProducer: \"pixflow\"
        OutputCroppedADCProducer:  \"adc\"
        OutputCroppedVisiProducer: \"visi\"
        OutputCroppedFlowProducer: \"flow\"
        OutputCroppedMetaProducer: \"flowmeta\"
        OutputFilename: \"baka_lf.root\"
        SaveOutput: false
        CheckFlow:  false
        MakeCheckImage: false
        DoMaxPool: false
        RowDownsampleFactor: 2
        ColDownsampleFactor: 2
        MaxImages: -1
        LimitOverlap: false
        RequireMinGoodPixels: false
        MaxOverlapFraction: 0.2
        UseVectorizedCode: true
        HasVisibilityImage: false
        SaveTrainingOutput: false
        IsMC: {}
        """
        flowcrop_cfg = open(workdir + "/ublarflowcrop.cfg", 'w')
        print >> flowcrop_cfg, lfcrop_cfg.format(adc_producer,
                                                 chstatus_producer,
                                                 str(ismc).lower())
        flowcrop_cfg.close()
        flowcrop_pset = larcv.CreatePSetFromFile(
            workdir + "/ublarflowcrop.cfg", "UBLArFlowCrop")
        self.flowcrop_algo = ublarcvapp.UBCropLArFlow()
        self.flowcrop_algo.configure(flowcrop_pset)
        self.flowcrop_algo.initialize()
        self.flowcrop_algo.set_verbosity(0)
        self.ismc = ismc

        self._nentries = self.io.get_n_entries()
Пример #2
0
    def process_entry(self, entry_num):
        """ perform all actions -- send, receive, process, store -- for entry"""

        # get the entries
        ok = self._inlarcv.read_entry(entry_num)
        if not ok:
            raise RuntimeError("could not read larcv entry %d" % (entry_num))

        # get data
        ev_wholeview = self._inlarcv.get_data(larcv.kProductImage2D,
                                              self._adc_producer)
        ev_chstatus = self._inlarcv.get_data(larcv.kProductChStatus,
                                             self._chstatus_producer)

        wholeview_v = ev_wholeview.Image2DArray()
        print("Wholeview meta: ", wholeview_v[0].meta().dump())
        # ev_wholeview_copy = larcv.EventImage2D()
        labels = larcv.EventImage2D()
        labels.Append(larcv.Image2D(wholeview_v[0].meta()))
        labels.Append(larcv.Image2D(wholeview_v[1].meta()))
        labels.Append(larcv.Image2D(wholeview_v[2].meta()))
        labels_v = labels.Image2DArray()
        # labels_v = [larcv.Image2D(wholeview_v[0].meta()),larcv.Image2D(wholeview_v[1].meta()),larcv.Image2D(wholeview_v[2].meta())]
        nplanes = wholeview_v.size()
        run = self._inlarcv.event_id().run()
        subrun = self._inlarcv.event_id().subrun()
        event = self._inlarcv.event_id().event()
        print("num of planes in entry {}: ".format((run, subrun, event)),
              nplanes)

        # crop using UBSplit for infill network
        # we want to break the image into set crops to send in

        # create labels_image_v
        # for img in labels_v:
        #     img.paint(0)

        labels_v = ublarcvapp.InfillDataCropper().ChStatusToLabels(
            labels_v, ev_chstatus)

        # we split the entire image using UBSplitDetector
        scfg = """Verbosity: 3
        InputProducer: \"wire\"
        OutputBBox2DProducer: \"detsplit\"
        CropInModule: true
        OutputCroppedProducer: \"detsplit\"
        BBoxPixelHeight: 512
        BBoxPixelWidth: 496
        CoveredZWidth: 310
        FillCroppedYImageCompletely: true
        DebugImage: false
        MaxImages: -1
        RandomizeCrops: 0
        MaxRandomAttempts: 4
        MinFracPixelsInCrop: -0.0001
        TickForward: true
        """

        fcfg = open("ubsplit.cfg", 'w')
        print(scfg, end="", file=fcfg)
        fcfg.close()

        cfg = larcv.CreatePSetFromFile("ubsplit.cfg", "UBSplitDetector")
        algo = ublarcvapp.UBSplitDetector()
        algo.initialize()
        algo.configure(cfg)
        algo.set_verbosity(2)

        bbox_list = larcv.EventROI()
        img2du = []
        img2dv = []
        img2dy = []
        img2d_list = []
        bbox_v = larcv.EventROI().ROIArray()
        img2d_v = larcv.EventImage2D().Image2DArray()
        algo.process(wholeview_v, img2d_v, bbox_v)

        bbox_labels_list = larcv.EventROI()
        img2du_labels = []
        img2dv_labels = []
        img2dy_labels = []
        img2d_labels_list = []
        bbox_labels_v = larcv.EventROI().ROIArray()
        img2d_labels_v = larcv.EventImage2D().Image2DArray()
        algo.process(labels_v, img2d_labels_v, bbox_labels_v)

        algo.finalize()

        # seperate by planes
        for i in img2d_v:
            p = i.meta().plane()
            if p == 0:
                img2du.append(i)
            elif p == 1:
                img2dv.append(i)
            elif p == 2:
                img2dy.append(i)

        for i in img2d_labels_v:
            p = i.meta().plane()
            if p == 0:
                img2du_labels.append(i)
            elif p == 1:
                img2dv_labels.append(i)
            elif p == 2:
                img2dy_labels.append(i)

        img2d_list.append(img2du)
        img2d_list.append(img2dv)
        img2d_list.append(img2dy)
        img2d_labels_list.append(img2du_labels)
        img2d_labels_list.append(img2dv_labels)
        img2d_labels_list.append(img2dy_labels)

        for plane in img2d_list:
            print("In list", len(plane))
        for plane in img2d_labels_list:
            print("In labels list", len(plane))

        # sparsify image 2d
        thresholds = std.vector("float")(1, 10.0)
        sparseimg_list = []
        usparse_v = []
        vsparse_v = []
        ysparse_v = []

        for a, b in zip(img2d_list, img2d_labels_list):
            for img, label in zip(a, b):
                p = img.meta().plane()
                sparse_img = larcv.SparseImage(img, label, thresholds)
                if (p == 0):
                    usparse_v.append(sparse_img)
                elif (p == 1):
                    vsparse_v.append(sparse_img)
                elif (p == 2):
                    ysparse_v.append(sparse_img)

        sparseimg_list.append(usparse_v)
        sparseimg_list.append(vsparse_v)
        sparseimg_list.append(ysparse_v)

        for plane in sparseimg_list:
            print("In sparse list", len(plane))

        # send messages
        # (send crops to worker to go through network)
        replies = self.send_image_list(sparseimg_list,
                                       run=run,
                                       subrun=subrun,
                                       event=event)
        print("FINISHED SEND STEP")
        self.process_received_images(wholeview_v, ev_chstatus, replies,
                                     img2d_list)
        print("FINISHED PROCESS STEP")

        self._outlarcv.set_id(self._inlarcv.event_id().run(),
                              self._inlarcv.event_id().subrun(),
                              self._inlarcv.event_id().event())

        self._outlarcv.save_entry()
        print("SAVED ENTRY")
        return True
Пример #3
0
    def __init__(self, broker_address,
                 larcv_supera_file,
                 output_larcv_filename,
                 adc_producer="wire",
                 output_producer="larflow",
                 tick_backwards=False,
                 save_cropped_adc=False,
                 flow_dirs=["y2u","y2v"],
                 plane_scale_factors=[1.0,1.0,1.0],
                 receive_timeout_secs=300,
                 **kwargs):
        """
        this class loads either larcv::sparseimage or larcv::image2d data from
        the input file, prepares the data into a binary json (bson) message to
        be sent to the broker. when the broker replies with worker output,
        save it to larcv root file.

        inputs
        ------
        broker_address str address of broker Socket
        larcv_supera_file str path to input data
        output_larcv_file str path to output file

        kwargs
        -------
        adc_producer str (default:"wire") name of ADC image2d tree
        output_producer str (default:"larflow") name of output flow info. will append flow_dir to name.
        tick_backwards bool (default:False) set to True if reading in LArCV1 files
        save_cropped_adc bool (default:False) save the ADC crops
        flow_dirs [list of str] direction of flow. options are ["y2u","y2v"]
        """
        super(UBDenseLArFlowClient,self).__init__(broker_address,**kwargs)

        # setup the input iomanager
        tick_direction = larcv.IOManager.kTickForward
        if tick_backwards:
            tick_direction = larcv.IOManager.kTickBackward
        self._inlarcv = larcv.IOManager(larcv.IOManager.kREAD,"",
                                        tick_direction)
        self._inlarcv.add_in_file(larcv_supera_file)
        self._inlarcv.initialize()

        # setup output iomanager
        self._outlarcv = larcv.IOManager(larcv.IOManager.kWRITE)
        self._outlarcv.set_out_file(output_larcv_filename)
        self._outlarcv.initialize()

        # setup config
        self._adc_producer    = adc_producer
        self._output_producer = output_producer

        # thresholds: adc values must be above this value to be included
        self._threshold_v   = std.vector("float")(3,10.0)

        # global scale factors to apply to ADC values for each plane
        self._plane_scale_factors = plane_scale_factors

        # setup logger
        self._log = logging.getLogger(__name__)

        # setup ubdetsplit
        fcfg = open("tmp_ubsplit.cfg",'w')
        print(self._get_default_ubsplit_cfg(),file=fcfg)
        fcfg.close()
        split_pset = larcv.CreatePSetFromFile( "tmp_ubsplit.cfg","UBSplitDetector")
        self._split_algo = ublarcvapp.UBSplitDetector()
        self._split_algo.configure(split_pset)
        self._split_algo.initialize()
        self._split_algo.set_verbosity(2)

        self.flow_dirs = flow_dirs
        self._timeout_secs = receive_timeout_secs
Пример #4
0
                         larcv.IOManager.kTickBackward)
    io.add_in_file(supera_file)
    io.initialize()

    weights = [
        "sparseinfill_uplane_test.tar", "sparseinfill_vplane_test.tar",
        "sparseinfill_yplane_test.tar"
    ]

    # splitter
    cfg = "../infill_split.cfg"

    pset = larcv.CreatePSetFromFile(cfg, "UBSplitDetector")
    print(pset.dump())

    ubsplit = ublarcvapp.UBSplitDetector()
    ubsplit.configure(pset)
    ubsplit.initialize()

    ubbadch = ublarcvapp.EmptyChannelAlgo()

    nentries = io.get_n_entries()

    for ientry in range(nentries):
        io.read_entry(ientry)

        # Event Image
        ev_img = io.get_data(larcv.kProductImage2D, "wire")
        img_v = ev_img.Image2DArray()
        img_np_v = [larcv.as_ndarray(img_v.at(p)) for p in range(3)]
Пример #5
0
    def process_entry(self, entry_num, tick_backwards):
        """ perform all actions -- send, receive, process, store -- for entry"""

        # get the entries
        ok = self._inlarcv.read_entry(entry_num)
        if not ok:
            raise RuntimeError("could not read larcv entry %d" % (entry_num))

        # get data
        ev_wholeview = self._inlarcv.get_data(larcv.kProductImage2D,
                                              self._adc_producer)
        ev_chstatus = self._inlarcv.get_data(larcv.kProductChStatus,
                                             self._chstatus_producer)
        wholeview_v = ev_wholeview.Image2DArray()
        print("Wholeview meta: ", wholeview_v[0].meta().dump())
        labels_v = ev_wholeview.Image2DArray()
        nplanes = wholeview_v.size()
        run = self._inlarcv.event_id().run()
        subrun = self._inlarcv.event_id().subrun()
        event = self._inlarcv.event_id().event()
        print("num of planes in entry {}: ".format((run, subrun, event)),
              nplanes)

        # crop using UBSplit for infill network
        # we want to break the image into set crops to send in

        # define the bbox_v images and cropped images
        bbox_list = larcv.EventROI()
        img2d_list = larcv.EventImage2D()

        bbox_v = larcv.EventROI().ROIArray()
        img2d_v = larcv.EventImage2D().Image2DArray()

        # we split the entire image using UBSplitDetector
        scfg = """Verbosity: 3
        InputProducer: \"wire\"
        OutputBBox2DProducer: \"detsplit\"
        CropInModule: true
        OutputCroppedProducer: \"detsplit\"
        BBoxPixelHeight: 512
        BBoxPixelWidth: 832
        CoveredZWidth: 310
        FillCroppedYImageCompletely: true
        DebugImage: false
        MaxImages: -1
        RandomizeCrops: false
        MaxRandomAttempts: 4
        MinFracPixelsInCrop: -0.0001
        TickForward: true
        """

        fcfg = open("ubsplit.cfg", 'w')
        print(scfg, end="", file=fcfg)
        fcfg.close()

        cfg = larcv.CreatePSetFromFile("ubsplit.cfg", "UBSplitDetector")
        algo = ublarcvapp.UBSplitDetector()
        algo.initialize()
        algo.configure(cfg)
        algo.set_verbosity(2)

        bbox_list = larcv.EventROI()
        img2du = []
        img2dv = []
        img2dy = []
        img2d_list = []

        bbox_v = larcv.EventROI().ROIArray()
        img2d_v = larcv.EventImage2D().Image2DArray()

        algo.process(wholeview_v, img2d_v, bbox_v)

        # seperate by planes
        for i in img2d_v:
            p = i.meta().plane()
            if p == 0:
                if tick_backwards:
                    scalefactor = 43.0 / 53.0
                    ublarcvapp.InfillImageStitcher().PixelScaling(
                        i, scalefactor)
                img2du.append(i)
            elif p == 1:
                if tick_backwards:
                    scalefactor = (43.0 / 52.0)
                    ublarcvapp.InfillImageStitcher().PixelScaling(
                        i, scalefactor)
                img2dv.append(i)
            elif p == 2:
                if tick_backwards:
                    scalefactor = (48.0 / 59.0)
                    ublarcvapp.InfillImageStitcher().PixelScaling(
                        i, scalefactor)
                img2dy.append(i)

        img2d_list.append(img2du)
        img2d_list.append(img2dv)
        img2d_list.append(img2dy)

        for plane in img2d_list:
            print("In list", len(plane))

        # send messages
        # (send crops to worker to go through network)
        replies = self.send_image_list(img2d_list,
                                       run=run,
                                       subrun=subrun,
                                       event=event)
        print("FINISHED SEND STEP")
        self.process_received_images(wholeview_v, ev_chstatus, replies,
                                     tick_backwards)
        print("FINISHED PROCESS STEP")

        self._outlarcv.set_id(self._inlarcv.event_id().run(),
                              self._inlarcv.event_id().subrun(),
                              self._inlarcv.event_id().event())

        self._outlarcv.save_entry()
        print("SAVED ENTRY")
        return True