def getbatch(self, batchsize):
        self.io.batch_process(batchsize)
        time.sleep(0.1)
        itry = 0
        while self.io.thread_running() and itry < 100:
            time.sleep(0.01)
            itry += 1
        if itry >= 100:
            raise RuntimeError("Batch Loader timed out")

        # fill SegData object
        data = SegData()
        dimv = self.io.dim()  # c++ std vector through ROOT bindings
        self.dim = (dimv[0], dimv[1], dimv[2], dimv[3])
        self.dim3 = (dimv[0], dimv[2], dimv[3])

        # numpy arrays
        data.np_images = np.zeros(self.dim, dtype=np.float32)
        data.np_labels = np.zeros(self.dim3, dtype=np.int)
        data.np_weights = np.zeros(self.dim3, dtype=np.float32)
        data.np_images[:] = larcv.as_ndarray(self.io.data()).reshape(
            self.dim)[:]
        data.np_labels[:] = larcv.as_ndarray(self.io.labels()).reshape(
            self.dim3)[:]
        data.np_weights[:] = larcv.as_ndarray(self.io.weights()).reshape(
            self.dim3)[:]
        data.np_weights *= 100000.0
        data.np_labels[:] += -1

        print "check: unique labels=", np.unique(data.np_labels)

        # adjust adc values, threshold, cap
        data.np_images *= 0.83  # scaled to be closer to EXTBNB
        threshold = np.random.rand() * 7.0 + 0.0  # threshold 0-5
        for ibatch in range(self.dim[0]):
            lx = data.np_labels[ibatch, :]
            lw = data.np_weights[ibatch, :]
            x = data.np_images[ibatch, 0, :]
            lx[x < threshold] = 0
            #lw[lx==3] *= 0.1 # mod noise weights
            lx = data.np_labels[ibatch, :] = lx[:]

        data.np_images[data.np_images < threshold] = 0.0
        data.np_images[data.np_images > (500.0 +
                                         threshold)] = 500.0 + threshold

        # pytorch tensors
        data.images = torch.from_numpy(data.np_images)
        data.labels = torch.from_numpy(data.np_labels)
        data.weight = torch.from_numpy(data.np_weights)
        #if GPUMODE:
        #    data.images.cuda()
        #    data.labels.cuda(async=False)
        #    data.weight.cuda(async=False)

        # debug values
        #print "max label: ",np.max(data.labels)
        #print "min label: ",np.min(data.labels)

        return data
示例#2
0
def show_event(entry=-1, plane=0):
    # Create TChain for data image
    chain_image2d = ROOT.TChain('image2d_data_tree')
    chain_image2d.AddFile('data/test_10k.root')
    # Create TChain for label image
    chain_label2d = ROOT.TChain('image2d_segment_tree')
    chain_label2d.AddFile('data/test_10k.root')

    if entry < 0:
        entry = np.random.randint(0, chain_label2d.GetEntries())

    chain_label2d.GetEntry(entry)
    chain_image2d.GetEntry(entry)

    # Let's grab a specific projection (1st one)
    image2d = larcv.as_ndarray(
        chain_image2d.image2d_data_branch.as_vector()[plane])
    label2d = larcv.as_ndarray(
        chain_label2d.image2d_segment_branch.as_vector()[plane])

    # Get image range to focus
    #xlim, ylim = get_view_range(image2d)

    # Dump images
    #fig, (ax0,ax1) = plt.subplots(1, 2, figsize=(18,12), facecolor='w')
    #ax0.imshow(image2d, interpolation='none', cmap='jet', origin='lower')
    #ax1.imshow(label2d, interpolation='none', cmap='jet', origin='lower',vmin=0., vmax=3.1)
    #ax0.set_title('Data',fontsize=20,fontname='Georgia',fontweight='bold')
    #ax0.set_xlim(xlim)
    #ax0.set_ylim(ylim)
    #ax1.set_title('Label',fontsize=20,fontname='Georgia',fontweight='bold')
    #ax1.set_xlim(xlim)
    #ax1.set_ylim(ylim)

    return (np.array(image2d), np.array(label2d))
示例#3
0
    def visualize( self, larlite_io, larcv_io, rawdigit_io ):
        
        event_images    = larcv_io.get_data( larcv.kProductImage2D, self.image_producer )
        event_hitmarker = larcv_io.get_data( larcv.kProductImage2D, self.hit_producer )
        event_rois   = larcv_io.get_data( larcv.kProductROI, self.roi_producer )
        lcv_imgs = event_images.Image2DArray()
        lcv_hits = event_hitmarker.Image2DArray()

        planes = []
        out_images = larcv.EventImage2D()

        for iimg,lcv_img in enumerate(event_images.Image2DArray()):
            planes.append(iimg)
            meta = lcv_img.meta()
            img_ang = larcv.as_ndarray(lcv_img)
            img_hit = larcv.as_ndarray(lcv_hits.at(iimg))
            
            # make two images, dx component, dy component
            img_dx = np.zeros( img_ang.shape, dtype=np.float32 )
            img_dy = np.zeros( img_ang.shape, dtype=np.float32 )
            
            hits = np.argwhere( img_hit>0.1 )
            for hit in hits:
                img_dx[hit[0],hit[1]] = np.cos( img_ang[hit[0],hit[1]] )*200.0
                img_dy[hit[0],hit[1]] = np.sin( img_ang[hit[0],hit[1]] )*100.0 + 100.0
            
            lcv_dx = larcv.as_image2d( img_dx, meta )
            lcv_dy = larcv.as_image2d( img_dy, meta )

            out_images.Append( lcv_dx )
            out_images.Append( lcv_dy )
            

        pytpcdata = TPCdataPlottable( self.image_producer, out_images.Image2DArray(), event_rois.ROIArray(), planes )
        return pytpcdata
示例#4
0
    def load_bbox(self, image_id):
        info = self.image_info[image_id]
        pdgs = info['pdgs']
        bbs = info['bbs']
        count = len(pdgs)
        mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8)
        assert (len(bbs) == len(pdgs)), 'bbs len does not equal  '

        image, image_mask, _ = self.load_this_entry(image_id)
        image_meta = image.meta()
        image.binary_threshold(10, 0, 1)
        img_ori_np = larcv.as_ndarray(image)

        y = set(img_ori_np.flatten())

        image_mask_np = larcv.as_ndarray(image_mask)

        boxes = np.zeros((count, 4, 2))

        for i in xrange(count):
            pdg = pdgs[i]
            bb = bbs[i]

            bb_tl = bb[0]
            bb_tr = bb[1]
            bb_bl = bb[2]
            bb_br = bb[3]

            new_tl = np.array([
                abs(image_meta.tl().x - bb_tl[0]),
                abs((image_meta.tl().y - bb_tl[1]) / 6)
            ])
            new_tr = np.array([
                abs(image_meta.tl().x - bb_tr[0]),
                abs((image_meta.tl().y - bb_tr[1]) / 6)
            ])
            new_bl = np.array([
                abs(image_meta.tl().x - bb_bl[0]),
                abs((image_meta.tl().y - bb_bl[1]) / 6)
            ])
            new_br = np.array([
                abs(image_meta.tl().x - bb_br[0]),
                abs((image_meta.tl().y - bb_br[1]) / 6)
            ])

            #if pdg==11: #this is introduced by some bug when generating bbox
            new_tl[1] = 512 - new_tl[1]
            new_tr[1] = 512 - new_tr[1]
            new_bl[1] = 512 - new_bl[1]
            new_br[1] = 512 - new_br[1]

            boxes[i] = np.array([new_tl, new_tr, new_bl, new_br])

        return boxes
示例#5
0
def get_entry(entry, tepath):
    # image
    chain_image2d = R.TChain("image2d_data_tree")
    chain_image2d.AddFile(tepath)
    chain_image2d.GetEntry(entry)
    cpp_image2d = chain_image2d.image2d_data_branch.as_vector().front()
    # label
    chain_label2d = R.TChain("image2d_segment_tree")
    chain_label2d.AddFile(tepath)
    chain_label2d.GetEntry(entry)
    cpp_label2d = chain_label2d.image2d_segment_branch.as_vector().front()
    return (np.array(larcv.as_ndarray(cpp_image2d)),
            np.array(larcv.as_ndarray(cpp_label2d)))
示例#6
0
 def load_image(self, image_id):
     if(verbose): sys.stdout.write("%s \n"%'>>>>load_this_entry in load_image')
     #print '>>>>load_this_entry in load_image'
     if(verbose): sys.stdout.flush()
     image,_,_ = self.load_this_entry(image_id)
     img_np_=larcv.as_ndarray(image)
     #print 'before thresholding, sum is ', np.sum(img_np_)
     #print img_np_[100,:]
     image.threshold(10,0) #thershold value here 
     img_np=larcv.as_ndarray(image)
     #print 'after thresholding, sum is ', np.sum(img_np_)
     #print image_np[100,:]
     img_np=img_np.reshape(512,512,1)
     return img_np.copy()
示例#7
0
    def visualize(self, larlite_io, larcv_io, rawdigit_io):
        event_imgs = larcv_io.get_data(larcv.kProductImage2D, self.producer)
        event_hits = larcv_io.get_data(larcv.kProductImage2D,
                                       self.hit_producer)

        lcv_imgs = event_imgs.Image2DArray()
        lcv_hits = event_hits.Image2DArray()

        pixel_vecs = []  # output container

        for iimg in xrange(0, lcv_imgs.size()):
            lcv_img = lcv_imgs.at(iimg)
            lcv_hit = lcv_hits.at(iimg)

            meta = lcv_img.meta()

            angimg = larcv.as_ndarray(lcv_img)
            hitpx = larcv.as_ndarray(lcv_hit)

            hits = np.argwhere(hitpx > 0.1)

            print "number of hits: ", len(hits)
            vec_data_x = np.zeros(2 *
                                  len(hits))  # we make pair of points for vec
            vec_data_y = np.zeros(2 *
                                  len(hits))  # we make pair of points for vec
            for i, hit in enumerate(hits):
                # set origin
                x = meta.pos_x(hit[0])
                y = meta.pos_y(hit[1])

                vec_data_x[2 * i] = x
                vec_data_y[2 * i] = y

                ang = angimg[hit[0], hit[1]]
                dx = np.cos(ang) * meta.pixel_width()
                dy = np.sin(ang) * meta.pixel_height()

                vec_data_x[2 * i + 1] = x + dx
                vec_data_y[2 * i +
                           1] = y - dy  # (-) because origin of y is top corner

            plot = PlotDataItem(x=vec_data_x,
                                y=vec_data_y,
                                pen=(255, 255, 255, 100),
                                connect='pairs')
            pixel_vecs.append(plot)

        return pixel_vecs
示例#8
0
def visualize_larcv_image2d( image2d, minz=0.0, maxz=200.0, reverse_ticks=False ):
    meta = image2d.meta()
    imgnp = np.transpose( larcv.as_ndarray( image2d ), (1,0) )
    if meta.plane() in [0,1]:
        imgnp = imgnp[:,0:2400]
        maxx = 2400.0
    else:
        maxx = meta.max_x()
    print("image shape: ",imgnp.shape," maxx=",maxx)
    xaxis = np.linspace( meta.min_x(), maxx, endpoint=False, num=int(maxx/meta.pixel_width()) )
    yaxis = np.linspace( meta.min_y(), meta.max_y(), endpoint=False, num=meta.rows() )
    print(type(imgnp),type(xaxis),type(yaxis))

    imgnp[ imgnp<minz ] = 0
    imgnp[ imgnp>maxz ] = maxz

    if reverse_ticks:
        imgnp = np.flip( imgnp, axis=0 )

    heatmap = {
        #"type":"heatmapgl",
        "type":"heatmap",
        "z":imgnp,
        "x":xaxis,
        "y":yaxis,
        "colorscale":"Jet",
        }
    return heatmap
def load_croppedset_sparse_dualflow_nomc(io,
                                         producer="croppedadc",
                                         threshold=10.0):

    # get crop set
    ev_crops = io.get_data(larcv.kProductImage2D, producer)
    crop_v = ev_crops.Image2DArray()
    ncrops = crop_v.size()
    nsets = ncrops / 3

    print "Number of sets=", nsets, " ncrops=", ncrops

    thresh_v = std.vector('float')(3, threshold)
    cuton_v = std.vector('int')(3, 1)

    # we are making a batch. collect the sparse arrays
    data = {"pixadc": []}
    for iset in xrange(nsets):
        # get instance, convert to numpy array, nfeatures per flow
        sparsedata = larcv.SparseImage(crop_v, iset * 3, iset * 3 + 2,
                                       thresh_v, cuton_v)
        sparse_np = larcv.as_ndarray(sparsedata, larcv.msg.kNORMAL)
        data["pixadc"].append(sparse_np)
        #print "nfeatures: ",nfeatures
    return data
示例#10
0
def convert_image2d( producername, evimage2d ):
    # input
    # -----
    # evimage2d: larcv::EventImage2D instance
    #
    # output
    # ------
    # dictionary with data in it

    import ROOT
    from larcv import larcv    

    meta  = evimage2d.Image2DArray().front().meta()
    nimgs = evimage2d.Image2DArray().size()
    imgdata_np = np.zeros( (nimgs,meta.cols(),meta.rows()), dtype=np.float32 )
    imgmeta_np = np.zeros( (nimgs,1,7), dtype=np.float )
        
    for i in xrange( evimage2d.Image2DArray().size() ):
        img = evimage2d.Image2DArray().at(i)
        imgdata_np[i,:,:] = larcv.as_ndarray( img  )
        meta = img.meta()            
        imgmeta_np[i,0,0] = meta.cols()
        imgmeta_np[i,0,1] = meta.rows()
        imgmeta_np[i,0,2] = meta.min_x()
        imgmeta_np[i,0,3] = meta.min_y()
        imgmeta_np[i,0,4] = meta.max_x()
        imgmeta_np[i,0,5] = meta.max_y()
        imgmeta_np[i,0,6] = meta.plane()

    data = {}
    data["image2d_%s"%(producername)]   = imgdata_np
    data["imagemeta_%s"%(producername)] = imgmeta_np
    print "convert image2d: producer=",producername
    return data
示例#11
0
    def __getitem__(self,index):
        
        # have io read entry
        self.io.read_entry( index )
        
        # now we convert and organize the data products
        numpy_arrays = OrderedDict()
        self.rse = None
        for ktype,producer_name in self.products:
            try:
                ev_data = self.io.get_data( ktype, producer_name )
            except:
                raise RuntimeError("could not retrieve data product for product_id=%d and producername=%s"%(ktype,producer_name))
            if self.rse is None:
                self.rse = ( ev_data.run(), ev_data.subrun(), ev_data.event() )
            

            # handle different data product types
            if ktype==larcv.kProductImage2D:
                img_v = ev_data.Image2DArray()
                img_np = np.zeros( (img_v.size(),img_v[0].meta().cols(),img_v[0].meta().rows()), dtype=np.float32 )
                meta_v = []
                for iimg in range(img_v.size()):
                    img_np[iimg,:] = larcv.as_ndarray( img_v[iimg] )[:]
                    meta_v.append( img_v[iimg].meta() )
                self.image2d_meta_dict[(ktype,producer_name)] = meta_v
                numpy_arrays[(ktype,producer_name)] = img_np
            else:
                raise RuntimeError("product,\"{}\", not yet supported. please support it by adding it here.".format(ktype))

        output = []
        for k,v in numpy_arrays.items():
            output.append(v)

        return output
示例#12
0
def convert_chstatus( producername, evchstatus ):
    import ROOT
    from larcv import larcv

    status_np = larcv.as_ndarray( evchstatus )
    data = {"chstatus_%s"%(producername):status_np}
    print "convert chstatus: producer=",producername
    return data
示例#13
0
def parse_tensor3d(data):
    """
    A function to retrieve larcv::EventSparseTensor3D as a numpy array
    Args:
        length 1 array of larcv::EventSparseTensor3D
    Return:
        a numpy array of a dense 3d tensor object
    """
    return np.array(larcv.as_ndarray(event_tensor3d))
示例#14
0
def Get_crop_image(plane, x_2d, y_2d, ev_img):
    meta_crop = larcv.ImageMeta(512, 512 * 6, 512, 512, 0, 8448, plane)
    meta_origin_x, meta_origin_y = meta_origin_helper(x_2d, y_2d, verbose=1)
    meta_crop.reset_origin(meta_origin_x, meta_origin_y)
    img_vtx = ev_img.at(plane).crop(meta_crop)
    img_vtx = larcv.as_ndarray(img_vtx)
    img_vtx = np.where(img_vtx < 10, 0, img_vtx)
    img_vtx = np.where(img_vtx > 500, 500, img_vtx)
    return img_vtx
示例#15
0
    def visualize(self, larlite_io, larcv_io, rawdigit_io):
        event_imgs = larcv_io.get_data(larcv.kProductImage2D, self.producer)

        lcv_imgs = event_imgs.Image2DArray()

        cluster_vecs = []  # output container

        for iimg in xrange(0, lcv_imgs.size()):
            lcv_img = lcv_imgs.at(iimg)

            meta = lcv_img.meta()
            plane = meta.plane()

            img = larcv.as_ndarray(lcv_img)

            img += 0.1  # to help with float to int rounding
            maxid = int(np.amax(img))
            print "number of clusters: ", type(maxid)

            for ic in xrange(1, maxid):  # skip nothing label
                hits = np.argwhere(np.logical_and(img > (ic), img < ic + 1))
                nhits = len(hits)
                if nhits == 0:
                    continue
                print "clusterid=", ic, " number of hits=", nhits
                x = np.zeros(len(hits))
                y = np.zeros(len(hits))
                for ihit in xrange(0, len(hits)):

                    x[ihit] = meta.pos_x(hits[ihit][0])
                    y[ihit] = meta.pos_y(hits[ihit][1])

                if ic == 1:
                    # background
                    color = (100, 100, 100, 75)
                    plot = PlotDataItem(x=x,
                                        y=y,
                                        pen=None,
                                        symbolBrush=pg.mkBrush(color=color),
                                        symbol='o',
                                        symbolPen=pg.mkPen(color=color,
                                                           width=0.0),
                                        width=1.0)
                else:
                    color = VisDBScanClusters.COLORS[ic %
                                                     VisDBScanClusters.NCOLORS]
                    plot = PlotDataItem(x=x,
                                        y=y,
                                        pen=pg.mkPen(color=color, width=2),
                                        symbolBrush=pg.mkBrush(color=color),
                                        symbol='o',
                                        symbolPen=pg.mkPen(color=color,
                                                           width=0.0))

                cluster_vecs.append(plot)
        print "number of cluster plots: ", len(cluster_vecs)
        return cluster_vecs
def parse_tensor3d(event_tensor3d):
    """
    A function to retrieve larcv::EventSparseTensor3D as a numpy array
    Args:
        event_tensor3d (larcv::EventSparseTensor3D): larcv C++ object for a 3d sparse tensor object
    Return:
        a numpy array of a dense 3d tensor object
    """
    return np.array(larcv.as_ndarray(event_tensor3d))
示例#17
0
    def getbatch(self, batchsize):
        self.io.batch_process(batchsize)
        time.sleep(0.1)
        itry = 0
        while self.io.thread_running() and itry < 100:
            time.sleep(0.01)
            itry += 1
        if itry >= 100:
            raise RuntimeError("Batch Loader timed out")

        # fill SegData object
        data = SegData()
        dimv = self.io.dim()  # c++ std vector through ROOT bindings
        self.dim = (dimv[0], dimv[1], dimv[2], dimv[3])
        self.dim3 = (dimv[0], dimv[2], dimv[3])

        # numpy arrays
        data.np_images = np.zeros(self.dim, dtype=np.float32)
        data.np_labels = np.zeros(self.dim3, dtype=np.int)
        data.np_weights = np.zeros(self.dim3, dtype=np.float32)
        data.np_images[:] = larcv.as_ndarray(self.io.data()).reshape(
            self.dim)[:]
        data.np_labels[:] = larcv.as_ndarray(self.io.labels()).reshape(
            self.dim3)[:]
        data.np_weights[:] = larcv.as_ndarray(self.io.weights()).reshape(
            self.dim3)[:]
        data.np_labels[:] += -1

        # pytorch tensors
        data.images = torch.from_numpy(data.np_images)
        data.labels = torch.from_numpy(data.np_labels)
        data.weight = torch.from_numpy(data.np_weights)
        #if GPUMODE:
        #    data.images.cuda()
        #    data.labels.cuda(async=False)
        #    data.weight.cuda(async=False)

        # debug values
        #print "max label: ",np.max(data.labels)
        #print "min label: ",np.min(data.labels)

        return data
示例#18
0
def showCosSeg(which):
    deg = 8
    counter = np.zeros(deg)
    counter2 = np.zeros(deg)
    d1 = R.TChain('image2d_sbndwire_tree')
    d2 = R.TChain('image2d_sbnd_cosmicseg_tree')
    if which == 1:
        dname3 = 'SSTrain2.root'
        amount = 9000
    elif which == 2:
        dname3 = 'SSTest2.root'
        amount = 8500
    else:
        print('eff')
    fpath3 = '/user/jhenzerling/work/NEUsoft/Modules/SS/Data/' + dname3
    d1.AddFile(fpath3)
    d2.AddFile(fpath3)
    print(dname3)
    for entry in range(amount):
        if entry % 500 == 0:
            print("entry= ", entry)
        d1.GetEntry(entry)
        d2.GetEntry(entry)
        d1b = d1.image2d_sbndwire_branch
        d2b = d2.image2d_sbnd_cosmicseg_branch
        d1v = d1b.as_vector()
        d2v = d2b.as_vector()
        d1i = larcv.as_ndarray(d1v.front())
        d2i = larcv.as_ndarray(d2v.front())
        unique_values, unique_counts = np.unique(d2i, return_counts=True)
        for i in range(deg):
            counter[i] += np.count_nonzero(d2i == i)
            if i in unique_values:
                counter2[i] += 1
        #plt.imshow(d2i,cmap=plt.get_cmap())
        #if entry == 0:
        #	cbar = plt.colorbar(ticks = [0,1,2,3,4,5,6,7])
        #	cbar.set_ticklabels(['0=Background','1=Photon','2=Electron','3=Muon','4=Pi0','5=PiC','6=Proton','7=Other'])
        #plt.savefig(path1 + '/Output/Images/SS/event_%s_GEN.png' % (entry),dpi=1000)
    #plt.show()
    print('partcount all, ', counter)
    print('partcount5, ', counter2)
示例#19
0
def displayImage(chain,trig,eentry):
	chain.GetEntry(eentry)
	cbv = chain.image2d_sbndwire_branch.as_vector()
	im2d = larcv.as_ndarray(cbv.front())

	plt.title('Event %d Image2D' % (eentry))
	plt.imshow(im2d,cmap=plt.get_cmap())
	plt.savefig(path1 + '/Output/Images/PMULTI/event_%s.png' % (eentry))
	if trig == True:
		plt.show()
		plt.close()
示例#20
0
def RunImage(t, which, entry):
    deg = 3
    counter = np.zeros(deg)
    d1 = R.TChain('image2d_sbndwire_tree')
    d2 = R.TChain('image2d_sbnd_cosmicseg_tree')
    if which == 1:
        dname3 = 'SSTrain2.root'
        amount = 9000
    elif which == 2:
        dname3 = 'SSTest2.root'
        amount = 8500
    else:
        print('eff')
    print(dname3)
    fpath3 = '/user/jhenzerling/work/NEUsoft/Modules/SS/Data/' + dname3
    d1.AddFile(fpath3)
    d2.AddFile(fpath3)
    d1.GetEntry(entry)
    d2.GetEntry(entry)
    d1b = d1.image2d_sbndwire_branch
    d2b = d2.image2d_sbnd_cosmicseg_branch
    d1v = d1b.as_vector()
    d2v = d2b.as_vector()
    d1i = larcv.as_ndarray(d1v.front())
    d2i = larcv.as_ndarray(d2v.front())
    d1i2 = d2i.reshape((1, 655360))

    softmax = t.ana(input_data=d1i2)
    #print(softmax[0].argmax(axis=0).shape)
    #print(softmax[0].argmax(axis=1).shape)
    #print(softmax[0].argmax(axis=2).shape)
    #print(softmax[0].argmax(axis=3)[0].shape)
    #print(d2i.shape)
    output = softmax[0].argmax(axis=3)[0]
    fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(24, 8), facecolor='w')
    ax0.imshow(output, cmap=plt.get_cmap())
    ax0.set_title('run-net', fontsize=24)
    ax1.imshow(d2i, cmap=plt.get_cmap())
    ax1.set_title('raw', fontsize=24)
    plt.show()
def parse_tensor2d(event_tensor2d):
    """
    A function to retrieve larcv::EventSparseTensor2D as a list of numpy arrays
    Args:
        event_tensor2d (larcv::EventSparseTensor2D): larcv C++ object for a collection of 2d sparse tensor objects
    Return:
        a python list of numpy arrays where each array represent one 2d tensor in dense matrix format
    """
    result = []
    for tensor2d in event_tensor2d.as_vector():
        img = larcv.as_image2d(tensor2d)
        result.append(np.array(larcv.as_ndarray(img)))
    return result
示例#22
0
    def getbatch(self, batchsize):
        self.io.batch_process(batchsize)
        time.sleep(0.1)
        itry = 0
        while self.io.thread_running() and itry < 100:
            time.sleep(0.01)
            itry += 1
        if itry >= 100:
            raise RuntimeError("Batch Loader timed out")

        # Fill SegData object
        data = SegData()
        dimv = self.io.dim()  # c++ std vector via ROOT bindings
        self.dim = (dimv[0], dimv[1], dimv[2], dimv[3])
        # batch, channel, height, width
        # prediction tensor
        self.dim3 = (dimv[0], dimv[2], dimv[3])
        # batch, height, width
        # target 'truth' tensor

        # Numpy arrays
        data.np_images = np.zeros(self.dim, dtype=np.float32)
        data.np_labels = np.zeros(self.dim3, dtype=np.int)
        data.np_weights = np.zeros(self.dim3, dtype=np.float32)
        data.np_images[:] = larcv.as_ndarray(self.io.data()).reshape(
            self.dim)[:]
        data.np_labels[:] = larcv.as_ndarray(self.io.labels()).reshape(
            self.dim3)[:]
        data.np_weights[:] = larcv.as_ndarray(self.io.weights()).reshape(
            self.dim3)[:]
        data.np_labels[:] += -1

        # Torch tensors
        data.images = torch.from_numpy(data.np_images)
        data.labels = torch.from_numpy(data.np_labels)
        data.weight = torch.from_numpy(data.np_weights)

        return data
示例#23
0
def showCosSeg2(ted, which, entry):
    deg = 7
    counter = np.zeros(deg)
    d1 = R.TChain('image2d_sbndwire_tree')
    d2 = R.TChain('image2d_sbnd_cosmicseg_tree')
    if which == 1:
        dname3 = 'SSTrain.root'
        amount = 9000
    elif which == 2:
        dname3 = 'SSTest.root'
        amount = 8500
    else:
        print('eff')
    print(dname3)
    fpath3 = '/user/jhenzerling/work/NEUsoft/Modules/SS/Data/' + dname3
    d1.AddFile(fpath3)
    d2.AddFile(fpath3)
    d1.GetEntry(entry)
    d2.GetEntry(entry)
    d1b = d1.image2d_sbndwire_branch
    d2b = d2.image2d_sbnd_cosmicseg_branch
    d1v = d1b.as_vector()
    d2v = d2b.as_vector()
    d1i = larcv.as_ndarray(d1v.front())
    d2i = larcv.as_ndarray(d2v.front())
    if ted == 1:
        plt.imshow(d1i, cmap=plt.get_cmap())
    elif ted == 2:
        plt.imshow(d2i, cmap=plt.get_cmap())
    else:
        print('bruh')
    cbar = plt.colorbar(ticks=[0, 1, 2, 3, 4, 5, 6])
    cbar.set_ticklabels([
        '0=Background', '1=Elec', '2=Muon', '3=Phot', '4=Proton',
        '5=Other(K/PiC)'
    ])
    #plt.savefig(path1 + '/Output/Images/SS2/event_%s_GEN.png' % (entry),dpi=1000)
    plt.show()
示例#24
0
    def setup(self, bottom, top):
        """
        seems to be a required method for a PythonDataLayer
        """

        # get parameters
        params = eval(self.param_str)
        with open(params['configfile'], 'r') as f:
            self.config = yaml.load(f)

        self.batch_size = self.config["batch_size"]
        self._setupBranches(self.config)

        meanio = larcv.IOManager(larcv.IOManager.kREAD, "IOmean")
        meanio.add_in_file(self.config["meanfile"])
        meanio.initialize()
        mean_evtimg = meanio.get_data(larcv.kProductImage2D, "mean")
        self.nchannels = int(mean_evtimg.Image2DArray().size())
        self.width = int(mean_evtimg.Image2DArray().at(0).meta().cols())
        self.height = int(mean_evtimg.Image2DArray().at(0).meta().rows())
        self.mean_img = np.zeros((self.nchannels, self.width, self.height),
                                 dtype=np.float)
        for ch, img2d in enumerate(mean_evtimg.Image2DArray()):
            self.mean_img[ch, ...] = larcv.as_ndarray(img2d)[...]

        # set the blob sizes I guess
        data_shape = (self.batch_size, self.nchannels, self.width, self.height)
        label_shape = (self.batch_size, )
        eventid_shape = (self.batch_size, 5)
        top[0].reshape(*data_shape)
        top[1].reshape(*label_shape)
        top[2].reshape(*eventid_shape)

        # depending on the run mode, we setup the queue
        self.event_queue = Queue()
        if self.config["run_mode"] in ["sequential", "randomize"]:
            # setup the queue
            self.event_thread = Thread(target=fill_event_queue,
                                       args=(self.ioman, self.mean_img,
                                             self.event_queue,
                                             self.batch_size * 2, self.config))
            self.event_thread.setDaemon(True)
            self.event_thread.start()
        elif self.config["run_mode"] == "selection":
            self.batch_size = 1
        else:
            raise ValueError(
                "unrecognized run_mode. either [sequential,randomize,selection]"
            )
示例#25
0
文件: mpid_data.py 项目: ruian1/MPID
    def __getitem__(self, ENTRY):
        # Reading Image

        #print ("open ENTRY @ {}".format(ENTRY))

        self.particle_image_chain.GetEntry(ENTRY)
        self.this_image_cpp_object = self.particle_image_chain.sparse2d_wire_branch
        self.this_image = larcv.as_ndarray(
            self.this_image_cpp_object.as_vector()[self.plane])
        # Image Thresholding
        self.this_image = image_modify(self.this_image)

        #print (self.this_image)
        #print ("sum, ")
        #if (np.sum(self.this_image) < 9000):
        #    ENTRY+

        if self.augment:
            if random.randint(0, 1):
                #if True:
                #if (self.verbose): print ("flipped")
                self.this_image = np.fliplr(self.this_image)
            if random.randint(0, 1):
                #if True:
                #if (self.verbose): print ("transposed")
                self.this_image = self.this_image.transpose(1, 0)
        self.this_image = torch.from_numpy(self.this_image.copy())
        #        self.this_image=torch.tensor(self.this_image, device=self.device).float()

        self.this_image = self.this_image.clone().detach()

        # Reading Truth Info
        self.particle_mctruth_chain.GetEntry(ENTRY)
        self.this_mctruth_cpp_object = self.particle_mctruth_chain.particle_mctruth_branch
        self.this_mctruth = torch.zeros([5])

        for particle in self.this_mctruth_cpp_object.as_vector():
            if (particle.pdg_code() == 11):
                self.this_mctruth[0] = 1
            if (particle.pdg_code() == 22):
                self.this_mctruth[1] = 1
            if (particle.pdg_code() == 13):
                self.this_mctruth[2] = 1
            if (particle.pdg_code() == 211 or particle.pdg_code() == -211):
                self.this_mctruth[3] = 1
            if (particle.pdg_code() == 2212):
                self.this_mctruth[4] = 1

        return (self.this_image, self.this_mctruth)
示例#26
0
    def process_message(self, frames):
        """ we expect a batch for each plane 
        """

        # remake arrays
        self.shape_dict = {}
        self.meta_dict = {}
        self.rse_dict = {}
        self.image_dict = {}
        parts = len(frames)
        for i in range(0, parts, 3):
            # parse frames
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_comp = frames[i + 2]

            # -- decode meta
            print "meta msg: ", metamsg
            meta = decode_larcv1_metamsg(metamsg)
            rse = metamsg.split(":")[-1]

            # -- array
            if self.decoder == "msgpack":
                x_enc = zlib.decompress(x_comp)
                arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            elif self.decoder == "tmessage":
                print type(x_comp), len(x_comp), x_comp
                tmsg = larcv.Image2DTMessage(x_comp, len(x_comp))
                img = tmsg.decode()
                arr = larcv.as_ndarray(img)
            else:
                raise ValueError("Unrecognized decoder: {}".format(
                    self.decoder))

            key = (name, meta.plane())
            if key not in self.image_dict:
                self.image_dict[key] = {}
                self.meta_dict[key] = {}

            self.image_dict[key] = arr
            self.meta_dict[key] = meta
            self.shape_dict[key] = arr.shape
            self.rse_dict[key] = rse

            print "CaffeLArCV1Worker[{}] received array name=\"{}\" shape={} meta={} rse={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip(), rse)

        return "Thanks!"
示例#27
0
    def drawObjects(self, view_manager, io_manager, meta):

        image2d_array = io_manager.get_data(self._product_name,
                                            str(self._producerName))
        image2d_array = larcv.EventImage2D.to_image2d(image2d_array)

        self._data_arr = []

        for image2d_plane in image2d_array.image2d_array():
            thisView = view_manager.getViewPorts()[image2d_plane.meta().id()]
            self._data_arr.append(copy.copy(larcv.as_ndarray(image2d_plane).T))

            thisView.drawPlane(self._data_arr[-1])

        return
示例#28
0
def queue_examples(tfsession, enqueue_op, ph_image, ph_label, ioman, producer,
                   out_shape, planes, randomize):
    """ function which simply loads images in order from IOMan."""
    entry = 0
    num_entries = ioman.get_n_entries()
    vecshape = out_shape[0] * out_shape[1] * out_shape[2]
    while True:
        if not randomize:
            ioman.read_entry(entry)
        else:
            ientry = np.random.randint(0, num_entries)
            ioman.read_entry(ientry)

        # Get image
        event_images = ioman.get_data(larcv.kProductImage2D, producer)
        imgs = event_images.Image2DArray()

        # get label (This is for neutrino vs. cosmic)
        label = np.zeros((1), dtype=np.int32)
        event_rois = ioman.get_data(larcv.kProductROI, producer)
        roi_type = larcv.kROICosmic
        for roi in event_rois.ROIArray():
            if roi.MCSTIndex() != larcv.kINVALID_SHORT:
                continue
            roi_type = roi.Type()
            if roi_type == larcv.kROIUnknown:
                roi_type = larcv.PDG2ROIType(roi.PdgCode())
                break
        label[0] = int(roi_type)

        # fill numpy array
        outimg = np.zeros(out_shape, dtype=np.float32)
        for i, ch in enumerate(planes):
            inimg = larcv.as_ndarray(imgs.at(ch))
            outimg[:, :, i] = inimg

        # push into queue
        print "Enqueueing entry=%d" % (entry), label[0]
        tfsession.run(enqueue_op,
                      feed_dict={
                          ph_image: np.reshape(outimg, (vecshape)),
                          ph_label: label[0]
                      })

        # increment entry
        entry += 1
        if entry >= num_entries:
            entry = 0
示例#29
0
def load_data_larcv2(io):
    """ example of data loader function. we provide dictionary with numpy arrays (no batch) """
    from larcv import larcv
    import numpy as np

    width = 832
    height = 512
    src_adc_threshold = 10.0

    index = (1, 0)
    products = [
        "source", "targetu", "targetv", "flowy2u", "flowy2v", "visiy2u",
        "visiy2v", "meta"
    ]
    data = {}
    for k in products:
        if k != "meta":
            data[k] = np.zeros((1, width, height), dtype=np.float32)
        else:
            data[k] = np.zeros((3, width, height), dtype=np.float32)

    ev_adc = io.get_data("image2d", "adc")
    ev_flo = io.get_data("image2d", "pixflow")
    ev_vis = io.get_data("image2d", "pixvisi")

    data["source"][0, :, :] = larcv.as_ndarray(
        ev_adc.as_vector()[2]).transpose(1, 0)
    data["targetu"][0, :, :] = larcv.as_ndarray(
        ev_adc.as_vector()[0]).transpose(1, 0)
    data["targetv"][0, :, :] = larcv.as_ndarray(
        ev_adc.as_vector()[1]).transpose(1, 0)

    data["flowy2u"][0, :, :] = larcv.as_ndarray(
        ev_flo.as_vector()[0]).transpose(1, 0)
    data["flowy2v"][0, :, :] = larcv.as_ndarray(
        ev_flo.as_vector()[1]).transpose(1, 0)

    data["visiy2u"][0, :, :] = larcv.as_ndarray(
        ev_vis.as_vector()[0]).transpose(1, 0)
    data["visiy2v"][0, :, :] = larcv.as_ndarray(
        ev_vis.as_vector()[1]).transpose(1, 0)

    for ip in xrange(0, 3):
        data["meta"][ip, 0, 0] = ev_adc.as_vector()[ip].meta().min_x()
        data["meta"][ip, 0, 1] = ev_adc.as_vector()[ip].meta().min_y()
        data["meta"][ip, 0, 2] = ev_adc.as_vector()[ip].meta().max_x()
        data["meta"][ip, 0, 3] = ev_adc.as_vector()[ip].meta().max_y()

    return data
示例#30
0
def parse_tensor3d(data):
    """
    A function to retrieve larcv::EventSparseTensor3D as a dense numpy array
    Args:
        array of larcv::EventSparseTensor3D
    Return:
        a numpy array of a dense 3d tensor object, last dimension = channels
    """
    np_data = []
    meta = None
    for event_tensor3d in data:
        if meta is None:
            meta = event_tensor3d.meta()
        else:
            assert meta == event_tensor3d.meta()
        np_data.append(np.array(larcv.as_ndarray(event_tensor3d)))
    return np.stack(np_data, axis=-1)
示例#31
0
def makeEventData( iom, mean_images, index, config ):
    #evtimgs = iom.get_data( larcv.kProductImage2D, "tpc_hires_crop" )
    evtimgs = iom.get_data( larcv.kProductImage2D, "6ch_hires_crop" )
    evtroi  = iom.get_data( larcv.kProductROI, "tpc_hires_crop" )
    roi = evtroi.ROIArray().at(0)
    nchannels = evtimgs.Image2DArray().size()
    img2d_arr = np.zeros( mean_images.shape, dtype=np.float )
    min_thresh = config["imin"]
    max_thresh = config["imax"]
    for n,img2d in enumerate(evtimgs.Image2DArray()):
        # get image as ND array
        imgnd = larcv.as_ndarray( img2d )
        plane = img2d.meta().plane()

        # subtract mean
        imgnd -= mean_images[n,...]

        # subtract imin
        imgnd -= min_thresh[n]

        # zero pixels below threshold
        minmask = imgnd < 0.0
        maxmask = imgnd > max_thresh[n]
        imgnd[minmask] = 0.0
        imgnd[maxmask] = max_thresh[n]

        # copy to event array
        img2d_arr[n,...] = imgnd
        
    evtdata = EventData()
    evtdata.img2d_arr = img2d_arr
    # label
    if roi.Type()==larcv.kROICosmic:
        evtdata.label     = 0
    else:
        evtdata.label     = 1

    # event ID
    evtdata.eventid = np.zeros( (5), dtype=np.int )
    evtdata.eventid[0] = iom.event_id().run()
    evtdata.eventid[1] = iom.event_id().subrun()
    evtdata.eventid[2] = iom.event_id().event()
    evtdata.eventid[3] = -1 # window id goes here eventually
    evtdata.eventid[4] = index

    return evtdata
示例#32
0
    def setup( self, bottom, top):
        """
        seems to be a required method for a PythonDataLayer
        """

        # get parameters
        params = eval(self.param_str)
        with open(params['configfile'], 'r') as f:
            self.config = yaml.load(f)
            
        self.batch_size = self.config["batch_size"]
        self._setupBranches( self.config )

        meanio = larcv.IOManager( larcv.IOManager.kREAD, "IOmean" )
        meanio.add_in_file( self.config["meanfile"] )
        meanio.initialize()
        mean_evtimg = meanio.get_data( larcv.kProductImage2D, "mean" )
        self.nchannels = int(mean_evtimg.Image2DArray().size())
        self.width    = int(mean_evtimg.Image2DArray().at(0).meta().cols())
        self.height   = int(mean_evtimg.Image2DArray().at(0).meta().rows())
        self.mean_img = np.zeros( ( self.nchannels, self.width, self.height), dtype=np.float )
        for ch,img2d in enumerate(mean_evtimg.Image2DArray()):
            self.mean_img[ch,...] = larcv.as_ndarray( img2d )[...]

        # set the blob sizes I guess
        data_shape  = (self.batch_size, self.nchannels, self.width, self.height ) 
        label_shape = (self.batch_size,)
        eventid_shape = (self.batch_size, 5)
        top[0].reshape( *data_shape )
        top[1].reshape( *label_shape )
        top[2].reshape( *eventid_shape )

        # depending on the run mode, we setup the queue
        self.event_queue = Queue()
        if self.config["run_mode"] in ["sequential","randomize"]:
            # setup the queue
            self.event_thread = Thread( target=fill_event_queue, args=(self.ioman, self.mean_img, self.event_queue, self.batch_size*2, self.config ) )
            self.event_thread.setDaemon(True)
            self.event_thread.start()
        elif self.config["run_mode"]=="selection":
            self.batch_size = 1
        else:
            raise ValueError("unrecognized run_mode. either [sequential,randomize,selection]")
示例#33
0
文件: example.py 项目: LArbys/thrumu
    # get image vector (the image2d instances are stored in a std vector
    img_v = event_images.Image2DArray()

    # get the height (rows) and width (cols) of our image, using the first one in the vector
    # by convention our images are all the same size. mostly a restriction due to caffe.
    # notice how image meta data is stored in an ImageMeta class that we retrieve by the meta() method
    rows = img_v.front().meta().rows()
    cols = img_v.front().meta().cols()

    # We are going to output an BGR image, make the container for it
    outimg = np.zeros( (rows,cols,3) )
    
    # loop over the images in the array and put them into the numpy array
    for img in img_v:
        imgnd = larcv.as_ndarray(img) # we convert the Image2D data into a numpy array
        imgnd = np.transpose( imgnd, (1,0) ) # image2d and numpy conventions on row and cols are not the same...
        imgnd = imgnd[::-1,:] # my preference is to have time go from top to bottom, Image2D assumes otherwise, so I reverse the y-axis here (which are the rows)
        # we use the [0,255] BGR color scale. I'm not being careful about normalizing values. But I know that I want MIP tracks to be around 128.
        # the MIP peak was calibrated to be around 1.0 when we made this data.
        outimg[:,:,img.meta().plane()] = imgnd*128

    outname = 'entry_%d.png' % (entry)

    if not hascv:
        mat_display=plt.imshow( outimg )
        mat_display.write_png(outname)
    else:
        # note opencv uses BGR format
        # so B=U plane, G=V plane, R=Y plane
        cv2.imwrite( outname, outimg )
示例#34
0
treename = "image2d_tpc_tree"
filename = "supera_mc_muminus.root"

# Create TChain
ch = TChain(treename)

# Load file
ch.AddFile(filename)

# Get # entries
print "How many entries?", ch.GetEntries()

# Get entry 0
ch.GetEntry(0)

# EventImage2D object
br = None
exec ("br = ch.%s" % treename.replace("_tree", "_branch"))
print "EventImage2D object pointer:", br

# Let's get actual array of Image2D
img_arr = br.Image2DArray()

# Loop over individual Image2D
for img in img_arr:

    print img
    # Convert to Python ndarray
    print larcv.as_ndarray(img).shape
    # get the height (rows) and width (cols) of our image, using the first one in the vector
    # by convention our images are all the same size. mostly a restriction due to caffe.
    # notice how image meta data is stored in an ImageMeta class that we retrieve by the meta() method
    rows = img_v.front().meta().rows()
    cols = img_v.front().meta().cols()

    # We are going to output an BGR image, make the container for it           
    outimg = np.zeros( (rows,cols,3) )

    # Now I need to transpose the matrix before tagging the end of the particle tracks                                                                                               

    # loop over the images in the array and put them into the numpy array                                                                                                            
    for img in img_v:

        # we convert the Image2D data into a numpy array                                                                                                                             
        imgnd = larcv.as_ndarray(img)

        # image2d and numpy conventions on row and cols are not the same...                                                                                                          
        imgnd = np.transpose( imgnd, (1,0) )

        # my preference is to have time go from top to bottom, Image2D assumes otherwise, so I reverse the y-axis here (which are the rows)                                          
        imgnd = imgnd[::-1,:]

        # we use the [0,255] BGR color scale. I'm not being careful about normalizing values. But I know that I want MIP tracks to be around 128.                                    
        # the MIP peak was calibrated to be around 1.0 when we made this data.                                                                                                       
        outimg[:,:,img.meta().plane()] = imgnd*128

    # This inverts what I'll do below in order to operate on each of the images.  I'll set each of the outimg third components equal to the imgnds at the end                        
    # when the image is to be displayed                                                                                                                                              
    uplane_imgnd  = outimg[:,:,0]
    vplane_imgnd  = outimg[:,:,1]
示例#36
0
planeid = array('i',[0])
ttree.Branch("wireid",wireid,"wireid/I")
ttree.Branch("planeid",planeid,"planeid/I")
ttree.Branch("peak",peakmax,"peak/F")

nentries = ioman.get_n_entries()
print "NUM ENTRIES: ",nentries
num_entries = 10000

for entry in range(0,num_entries):
    ioman.read_entry(entry)

    event_images = ioman.get_data(larcv.kProductImage2D,"tpc")
    img0 = event_images.Image2DArray()[0]
    print ioman.event_id().run(),ioman.event_id().subrun(),ioman.event_id().event(),"(",entry,")"
    wfm0 = larcv.as_ndarray(img0)

    x = np.linspace(0,wfm0.shape[1],wfm0.shape[1])
    for img in event_images.Image2DArray():
        wfms = larcv.as_ndarray(img)
        if img.meta().plane()!=1:
            continue

        for w in range(0,wfms.shape[0]):
            y = wfms[w,:]
            yt = y > 40
            inpeak = False
            pmax = -1
            peakcenter = -1
            peaks = []
            for t in range(0,len(y)):
示例#37
0
文件: dump_img.py 项目: LArbys/LArCV
img_tree_name='image2d_%s_tree' % IMAGE_PRODUCER
img_br_name='image2d_%s_branch' % IMAGE_PRODUCER
img_ch = TChain(img_tree_name)
img_ch.AddFile(sys.argv[2])

start=0
cutoff=0
if len(sys.argv) > 3:
    cutoff = int(sys.argv[3])
if len(sys.argv) > 4:
    start = int(sys.argv[3])
    cutoff = int(sys.argv[4])

for entry in xrange(img_ch.GetEntries()):
    if entry<start: continue
    img_ch.GetEntry(entry)
    img_br=None
    exec('img_br=img_ch.%s' % img_br_name)
    event_key = img_br.event_key()
    index=0
    for img in img_br.Image2DArray():
        mat=larcv.as_ndarray(img)
        mat_display=plt.imshow(mat)
        mat_display.write_png('%s_plane%d_%d.png' % (event_key,img.meta().plane(),index))
        index+=1
    if cutoff and cutoff <= entry:
        break