Пример #1
0
def decode_larcv1_metamsg(metamsg, override_plane=None):
    """
    decode meta messages, which look like: 'Plane 65535 (rows,cols) = (0,0) ... Left Top (0,0) ... Right Bottom (0,0)\n'

    constructor:
    ImageMeta(const double width=0.,     const double height=0.,
      const size_t row_count=0., const size_t col_count=0,
      const double origin_x=0.,  const double origin_y=0.,
      const PlaneID_t plane=::larcv::kINVALID_PLANE)
    """
    print "decoding meta message: ", metamsg.decode("ascii")
    meta_nums = [int(x) for x in re.findall("\d+", metamsg.decode("ascii"))]
    width = meta_nums[5] - meta_nums[3]
    height = meta_nums[4] - meta_nums[6]
    rows = meta_nums[1]
    cols = meta_nums[2]
    plane = meta_nums[0]
    if override_plane is not None:
        plane = override_plane
    origin_x = meta_nums[3]
    origin_y = meta_nums[4]
    meta = larcv.ImageMeta(width, height, rows, cols, origin_x, origin_y,
                           plane)

    return meta
Пример #2
0
def Get_crop_image(plane, x_2d, y_2d, ev_img):
    meta_crop = larcv.ImageMeta(512, 512 * 6, 512, 512, 0, 8448, plane)
    meta_origin_x, meta_origin_y = meta_origin_helper(x_2d, y_2d, verbose=1)
    meta_crop.reset_origin(meta_origin_x, meta_origin_y)
    img_vtx = ev_img.at(plane).crop(meta_crop)
    img_vtx = larcv.as_ndarray(img_vtx)
    img_vtx = np.where(img_vtx < 10, 0, img_vtx)
    img_vtx = np.where(img_vtx > 500, 500, img_vtx)
    return img_vtx
Пример #3
0
def decode_larcv2_evimage2d(io, producername, imgdata_np, imgmeta_np):

    # make evcontainer
    evout = io.get_data("image2d", producername)

    # make meta
    nimgs = imgdata_np.shape[1]
    for i in xrange(nimgs):
        nrows = Long(imgmeta_np[0, i, 0, 1])
        ncols = Long(imgmeta_np[0, i, 0, 0])
        planeid = Long(imgmeta_np[0, i, 0, 6])

        lcvmeta = larcv.ImageMeta(imgmeta_np[0, i, 0, 2], imgmeta_np[0, i, 0,
                                                                     3],
                                  imgmeta_np[0, i, 0, 4], imgmeta_np[0, i, 0,
                                                                     5], nrows,
                                  ncols, planeid)
        # convert image
        outarr = np.flip(imgdata_np[0, i, :, :].transpose((1, 0)), 0)
        lcvimg = larcv.as_image2d_meta(outarr, lcvmeta)
        evout.append(lcvimg)

    return
Пример #4
0
if not o.initialize():
    sys.exit(ERROR_WRITE_INIT)
for idx in xrange(NUM_EVENT):

    img = larcv.Image2D(10, 10)
    for x in xrange(img.as_vector().size()):
        img.set_pixel(x, x % 3)

    event_image1 = o.get_data(larcv.kProductImage2D, "original")
    event_image1.Append(img)
    event_image2 = o.get_data(larcv.kProductImage2D, "target")
    event_image2.Append(img)

    roi = larcv.ROI()
    #ImageMeta(const double width=0.,     const double height=0.,
    #          const size_t row_count=0., const size_t col_count=0,
    #          const double origin_x=0.,  const double origin_y=0.,
    #          const PlaneID_t plane=::larcv::kINVALID_PLANE)
    meta = larcv.ImageMeta(3, 3, 3, 3, 2, -2, 0)
    roi.AppendBB(meta)
    event_roi = o.get_data(larcv.kProductROI, "roi")
    event_roi.Append(roi)
    o.set_id(0, 0, idx)

    p.process(o)
    o.save_entry()

    idx += 1
p.finalize()
o.finalize()
Пример #5
0
    def initialize(self):
        self._last_entry = -1
        self._event_keys = []
        self._metas = []
        # configure the input
        from larcv import larcv
        from ROOT import TChain
        # set 2d vs. 3d functions
        as_numpy_array = None
        dtype_keyword = ''
        if self._flags.DATA_DIM == 3:
            as_numpy_voxels = larcv.fill_3d_voxels
            as_numpy_pcloud = larcv.fill_3d_pcloud
            dtype_keyword = 'sparse3d'
        elif self._flags.DATA_DIM == 2:
            as_numpy_voxels = larcv.fill_2d_voxels
            as_numpy_pcloud = larcv.fill_2d_pcloud
            dtype_keyword = 'sparse2d'
        else:
            print('larcv IO not implemented for data dimension',
                  self._flags.DATA_DIM)
            raise NotImplementedError

        ch_data = TChain('%s_%s_tree' % (dtype_keyword, self._flags.DATA_KEY))
        ch_label = None
        if self._flags.LABEL_KEY:
            ch_label = TChain('%s_%s_tree' %
                              (dtype_keyword, self._flags.LABEL_KEY))
        for f in self._flags.INPUT_FILE:
            ch_data.AddFile(f)
            if ch_label: ch_label.AddFile(f)

        self._voxel = []
        self._feature = []
        self._label = []
        br_data, br_label = (None, None)
        event_fraction = 1. / ch_data.GetEntries() * 100.
        total_point = 0.
        for i in range(ch_data.GetEntries()):
            if self._flags.LIMIT_NUM_SAMPLE > 0 and i > self._flags.LIMIT_NUM_SAMPLE:
                break
            ch_data.GetEntry(i)
            if ch_label: ch_label.GetEntry(i)
            if br_data is None:
                br_data = getattr(
                    ch_data,
                    '%s_%s_branch' % (dtype_keyword, self._flags.DATA_KEY))
                if ch_label:
                    br_label = getattr(
                        ch_label, '%s_%s_branch' %
                        (dtype_keyword, self._flags.LABEL_KEY))

            # HACK that should go away when unifying 2d and 3d data reps...
            data_tensor = br_data
            label_tensor = br_label
            if self._flags.DATA_DIM == 2:
                data_tensor = br_data.as_vector().front()
                label_tensor = br_label.as_vector().front()

            num_point = data_tensor.as_vector().size()
            if num_point < 10: continue

            self._event_keys.append(
                (br_data.run(), br_data.subrun(), br_data.event()))
            # HACK that should go away when unifying 2d and 3d data reps...
            if self._flags.DATA_DIM == 2:
                self._metas.append(larcv.ImageMeta(label_tensor.meta()))
            else:
                self._metas.append(larcv.Voxel3DMeta(br_data.meta()))

            np_voxel = np.zeros(shape=(num_point, self._flags.DATA_DIM),
                                dtype=np.int32)
            as_numpy_voxels(data_tensor, np_voxel)
            self._voxel.append(np_voxel)

            np_feature = np.zeros(shape=(num_point, 1), dtype=np.float32)
            as_numpy_pcloud(data_tensor, np_feature)
            self._feature.append(np_feature)

            if ch_label:
                np_label = np.zeros(shape=(num_point, 1), dtype=np.float32)
                as_numpy_pcloud(label_tensor, np_label)
                np_label = np_label.reshape([num_point])  # - 1.
                self._label.append(np_label)
            total_point += num_point
            sys.stdout.write(
                'Processed %d%% ... %d MB\r' %
                (int(event_fraction * i), int(total_point * 4 * 2 / 1.e6)))
            sys.stdout.flush()

        sys.stdout.write('\n')
        sys.stdout.flush()
        self._num_channels = self._voxel[-1].shape[-1]
        self._num_entries = len(self._voxel)
        # Output
        if self._flags.OUTPUT_FILE:
            import tempfile
            cfg = '''
IOManager: {
      Verbosity:   2
      Name:        "IOManager"
      IOMode:      1
      OutFileName: "%s"
      InputFiles:  []
      InputDirs:   []
      StoreOnlyType: []
      StoreOnlyName: []
    }
                  '''
            cfg = cfg % self._flags.OUTPUT_FILE
            cfg_file = tempfile.NamedTemporaryFile('w')
            cfg_file.write(cfg)
            cfg_file.flush()
            self._fout = larcv.IOManager(cfg_file.name)
            self._fout.initialize()
Пример #6
0
        # get predictions from gpu
        ADCvalue_np = pred_ADCvalue.detach().cpu().numpy().astype(np.float32)
        ADC_t = ADC_t.detach().cpu()
        labelbasic_t = labelbasic_t.detach().cpu()
        weights_t = weights_t.detach().cpu()

        for ib in range(batch_size):
            if ientry >= nevts:
                # skip last portion of last batch
                break

            # get meta
            inputmeta.read_entry(ientry)
            ev_meta = inputmeta.get_data("image2d", "ADC")
            outmeta = larcv.ImageMeta(496, 832, 496, 832, plane, 0, 0)

            beforeloop = time.time()

            #variables for accuracy check
            within2 = 0.0
            within5 = 0.0
            within10 = 0.0
            within20 = 0.0
            chargetotal = 0.0

            labelbasic_numpy = labelbasic_t.numpy()
            weights_numpy = weights_t.numpy()
            ADC_numpy = ADC_t.numpy()

            #save a copy of labels for use in creating diff and threshold images
Пример #7
0
cfg3 = larcv.CreatePSetFromFile(sys.argv[4], "WireMask")
mask.configure(cfg3)

stream1.initialize()
stream2.initialize()
p.initialize()
mask.initialize()

if not o.initialize():
    sys.exit(ERROR_WRITE_INIT)

for idx in xrange(NUM_EVENT):

    #we have to make the image from ImageMeta if we are going to
    #use channel status as it checks image2d.plane
    meta1 = larcv.ImageMeta(10, 10, 10, 10, 0, 10, 0)
    img1 = larcv.Image2D(meta1)
    for x in xrange(img1.as_vector().size()):
        if x % 2 == 0: img1.set_pixel(x, 10)
        else: img1.set_pixel(x, 0)

    meta2 = larcv.ImageMeta(10, 10, 10, 10, 0, 10, 0)
    img2 = larcv.Image2D(meta2)
    for x in xrange(img2.as_vector().size()):
        img2.set_pixel(x, 10)
        if (x / 10) % 2 == 0: img2.set_pixel(x, 0)

    #Input stream 1
    event_image1_tpc = o.get_data(
        larcv.kProductImage2D, "stream1_tpc"
    )  # combined image is going to steal (std::move) this from me
Пример #8
0
track_img = larcv.Image2D(wholeview_v.at(1).meta())
bg_img = larcv.Image2D(wholeview_v.at(2).meta())
shower_img.paint(0)
track_img.paint(0)
bg_img.paint(0)

for out, img2d in zip(out_v, crop_img2d_v):

    # threshold scores for better compression
    showerslice = out[:, 1, :, :].reshape(512, 512)
    trackslice = out[:, 2, :, :].reshape(512, 512)
    bgslice = out[:, 0, :, :].reshape(512, 512)

    meta = img2d.meta()
    # back to image2d
    shrmeta = larcv.ImageMeta(meta.width(), meta.height(), meta.rows(),
                              meta.cols(), meta.min_x(), meta.min_y(), 0)
    trkmeta = larcv.ImageMeta(meta.width(), meta.height(), meta.rows(),
                              meta.cols(), meta.min_x(), meta.min_y(), 1)
    bgmeta = larcv.ImageMeta(meta.width(), meta.height(), meta.rows(),
                             meta.cols(), meta.min_x(), meta.min_y(), 2)
    showercrop = larcv.as_image2d_meta(showerslice, shrmeta)
    shower_img.overlay(showercrop, larcv.Image2D.kOverWrite)
    trackcrop = larcv.as_image2d_meta(trackslice, trkmeta)
    track_img.overlay(trackcrop, larcv.Image2D.kOverWrite)
    bgcrop = larcv.as_image2d_meta(bgslice, bgmeta)
    bg_img.overlay(bgcrop, larcv.Image2D.kOverWrite)

h2d = larcv.as_th2d(shower_img, "shower_img")
c.Clear()
h2d.Draw("colz")
c.Update()
Пример #9
0
                        for ip,bbb in enumerate(bb_v):
                            print "  plane ",ip,": ",bbb.dump()
                        isbad = True
                    bounds.append( (rmin,cmin,rmax,cmax) )
                    
                # we have to get the row, col bounds in the source image
                if isbad:
                    #sign of bad image
                    image_meta.append(None) 
                    target_meta.append(None)
                    continue
                
                source_np[ib,0,:,:] = img_np[2,0,bounds[2][0]:bounds[2][2],bounds[2][1]:bounds[2][3]] # yplane
                target_np[ib,0,:,:] = img_np[0,0,bounds[0][0]:bounds[0][2],bounds[0][1]:bounds[0][3]] # uplane
                # store region of image
                image_meta.append( larcv.ImageMeta( bb_v[2], 512, 832 ) )
                target_meta.append( larcv.ImageMeta( bb_v[0], 512, 832 ) )

            if verbose:
                print "batch using ",len(image_meta)," slots"
        
            # filled batch, make tensors
            source_t = torch.from_numpy( source_np ).to(device=torch.device("cuda:1"))
            target_t = torch.from_numpy( target_np ).to(device=torch.device("cuda:1"))
            tformat = time.time()-tformat
            timing["+++format"] += tformat
            if verbose:
                print "time to slice and fill tensor batch: ",tformat," secs"

            # run model
            trun = time.time()
Пример #10
0
 subrun_vertex = ev_img.subrun()
 event_vertex = ev_img.event()   
 index_array = di.query('run == {:2d} & subrun == {:2d} & event == {:2d}'.format(run_vertex,subrun_vertex,event_vertex)).index.values
 
 x_2d = ROOT.Double()
 y_2d = ROOT.Double()
 vertex_index = index_array[0]
 vx , vy ,vz = df['v_x'][vertex_index], df['v_y'][vertex_index], df['v_z'][vertex_index]
 
 whole_img = ev_img.at(plane)
 whole_image=larcv.as_ndarray(whole_img)
 whole_img.reset_origin(0, 8448)
 larcv.Project3D(whole_img.meta(), vx, vy, vz, 0,  plane, x_2d, y_2d)
 
         
 meta_crop = larcv.ImageMeta(512,512*6,512,512,0,0,plane)   
 meta_origin_x, meta_origin_y = u.Meta_origin_helper(x_2d, y_2d, verbose=0)
 meta_crop.reset_origin(meta_origin_x, meta_origin_y)
 
 image_vtx = ev_img.at(plane).crop(meta_crop)
 img_vtx = larcv.as_ndarray(image_vtx)
 img_vtx = np.where(img_vtx<10 ,0  ,img_vtx)
 img_vtx = np.where(img_vtx>500,500,img_vtx)
 
 if(entry <= training_number):
     image2d_array_output = io_output.get_data(larcv.kProductImage2D, 'image2d_binary')
     image_as_2d = larcv.as_image2d(img_vtx)
     image2d_array_output.Append(image_as_2d)
     io_output.set_id(100,1,event_out)
     io_output.save_entry()
     event_out += 1
Пример #11
0
    # get data
    #source,target1,target2,flow1,flow2,visi1,visi2,fvisi1,fvisi2,Wminx,Uminx,Vminx,srcmeta = prep_data( iovalid, "valid", batchsize_valid,
    #                                                                                                    IMAGE_WIDTH, IMAGE_HEIGHT, ADC_THRESH, DEVICE )
    #source,target1,target2,flow1,flow2,visi1,visi2,fvisi1,fvisi2,Wminx,Uminx,Vminx,srcmeta = prep_data2( io, ientry, batchsize_valid,
    #                                                                                                     IMAGE_WIDTH, IMAGE_HEIGHT, ADC_THRESH, DEVICE )
    source, target1, target2, flow1, flow2, visi1, visi2, fvisi1, fvisi2, Wminx, Uminx, Vminx, srcmeta = prep_data3(
        ioserver, batchsize_valid, IMAGE_WIDTH, IMAGE_HEIGHT, ADC_THRESH,
        DEVICE)

    # make imagemeta for source
    srcmetas = []
    targetumetas = []
    targetvmetas = []
    for b in xrange(batchsize_valid):
        meta = larcv.ImageMeta(srcmeta[b, 2, 0, 0], srcmeta[b, 2, 0, 1],
                               srcmeta[b, 2, 0, 2], srcmeta[b, 2, 0, 3],
                               IMAGE_HEIGHT, IMAGE_WIDTH, 2)
        srcmetas.append(meta)
        meta = larcv.ImageMeta(srcmeta[b, 0, 0, 0], srcmeta[b, 0, 0, 1],
                               srcmeta[b, 0, 0, 2], srcmeta[b, 0, 0, 3],
                               IMAGE_HEIGHT, IMAGE_WIDTH, 2)
        targetumetas.append(meta)
        meta = larcv.ImageMeta(srcmeta[b, 1, 0, 0], srcmeta[b, 1, 0, 1],
                               srcmeta[b, 1, 0, 2], srcmeta[b, 1, 0, 3],
                               IMAGE_HEIGHT, IMAGE_WIDTH, 2)
        targetvmetas.append(meta)

    # check image2d input
    img_adc = larcv.as_image2d_meta(
        source[0, 0, :, :].detach().cpu().numpy().transpose((1, 0)),
        srcmetas[0])
Пример #12
0
o.set_out_file(OUT_FNAME)

if not o.initialize():
    sys.exit(ERROR_WRITE_INIT)

p = larcv.VtxInRegion()
p.set_verbosity(2)
cfg = larcv.CreatePSetFromFile(sys.argv[1], "VtxInRegion")
p.configure(cfg)

x = []
y = []
for idx1 in xrange(0, 1000, 10):
    for idx2 in xrange(-100, 100, 2):
        event_roi1 = o.get_data(larcv.kProductROI, "vtxinregion")
        bb1 = larcv.ImageMeta(2, 2, 2, 2, 2, 2, 0)  #doesn't matter
        roi1 = larcv.ROI()
        roi1.Type(2)
        roi1.Position(0, idx2, idx1, 0)
        roi1.AppendBB(bb1)
        event_roi1.clear()
        event_roi1.Append(roi1)

        if (p.process(o) == True):
            x.append(idx1)
            y.append(idx2)

import numpy as np
x = np.array(x)
y = np.array(y)
import matplotlib.pyplot as plt