def process_received_images(self, wholeview_v, outimg_v): """ receive the list of images from the worker """ nplanes = wholeview_v.size() ev_shower = self._outlarcv.\ get_data(larcv.kProductImage2D, self._ssnet_tree_name+"_shower") ev_track = self._outlarcv.\ get_data(larcv.kProductImage2D, self._ssnet_tree_name+"_track") #ev_bg = self._outlarcv.\ # get_data(larcv.kProductImage2D,"ssnet_background") for p in xrange(nplanes): showerimg = larcv.Image2D(wholeview_v.at(p).meta()) #bgimg = larcv.Image2D( wholeview_v.at(p).meta() ) trackimg = larcv.Image2D(wholeview_v.at(p).meta()) showerimg.paint(0) #bgimg.paint(0) trackimg.paint(0) nimgsets = len(outimg_v[p]) / 2 for iimgset in xrange(nimgsets): #bg = outimg_v[p][iimgset*3+0] shr = outimg_v[p][iimgset * 2 + 0] trk = outimg_v[p][iimgset * 2 + 1] showerimg.overlay(shr, larcv.Image2D.kOverWrite) trackimg.overlay(trk, larcv.Image2D.kOverWrite) #bgimg.overlay( bg, larcv.Image2D.kOverWrite) ev_shower.Append(showerimg) ev_track.Append(trackimg)
def process_received_images(self, wholeview_v, ev_chstatus, outimg_v, tick_backwards): """ receive the list of images from the worker """ # this is where we stitch the crops together nplanes = wholeview_v.size() ev_infill = self._outlarcv.\ get_data(larcv.kProductImage2D, self._infill_tree_name) ev_input = self._outlarcv.\ get_data(larcv.kProductImage2D, self._adc_producer) for p in xrange(nplanes): # create final output image outputimg = larcv.Image2D(wholeview_v.at(p).meta()) outputimg.paint(0) # temp image to use for averaging later overlapcountimg = larcv.Image2D(wholeview_v.at(p).meta()) overlapcountimg.paint(0) nimgsets = len(outimg_v[p]) output_meta = outputimg.meta() # if tickbackwards,scale for mcc9 values if tick_backwards: for i in outimg_v[p]: if p == 0: scalefactor = (53.0 / 43.0) ublarcvapp.InfillImageStitcher().PixelScaling( i, scalefactor) elif p == 1: scalefactor = (52.0 / 43.0) ublarcvapp.InfillImageStitcher().PixelScaling( i, scalefactor) elif p == 2: scalefactor = (59.0 / 48.0) ublarcvapp.InfillImageStitcher().PixelScaling( i, scalefactor) # loop through all crops to stitch onto outputimage for iimgset in xrange(nimgsets): ublarcvapp.InfillImageStitcher().Croploop( output_meta, outimg_v[p][iimgset], outputimg, overlapcountimg) # creates overlay image and takes average where crops overlapped ublarcvapp.InfillImageStitcher().Overlayloop( p, output_meta, outputimg, overlapcountimg, wholeview_v, ev_chstatus) ev_infill.Append(outputimg) ev_input.Append(wholeview_v.at(p))
from larcv import larcv o = larcv.IOManager(larcv.IOManager.kWRITE) o.reset() o.set_verbosity(MSG_LEVEL) o.set_out_file(OUT_FNAME) p = larcv.CropROI() cfg = larcv.CreatePSetFromFile(sys.argv[1], "CropROI") p.configure(cfg) p.initialize() if not o.initialize(): sys.exit(ERROR_WRITE_INIT) for idx in xrange(NUM_EVENT): img = larcv.Image2D(10, 10) for x in xrange(img.as_vector().size()): img.set_pixel(x, x % 3) event_image1 = o.get_data(larcv.kProductImage2D, "original") event_image1.Append(img) event_image2 = o.get_data(larcv.kProductImage2D, "target") event_image2.Append(img) roi = larcv.ROI() #ImageMeta(const double width=0., const double height=0., # const size_t row_count=0., const size_t col_count=0, # const double origin_x=0., const double origin_y=0., # const PlaneID_t plane=::larcv::kINVALID_PLANE) meta = larcv.ImageMeta(3, 3, 3, 3, 2, -2, 0) roi.AppendBB(meta)
runid = ev_img.run() subrunid = ev_img.subrun() eventid = ev_img.event() print "ADC Input image. Nimgs=", img_v.size(), " (rse)=", (runid, subrunid, eventid) # setup stitcher if stitch: stitcher.setupEvent(img_v) # output stitched images out_v = rt.std.vector("larcv::Image2D")() for i in range(img_v.size()): img_np[i, 0, :, :] = larcv.as_ndarray(img_v[i]).transpose((1, 0)) out = larcv.Image2D(img_v[i].meta()) out.paint(0.0) out_v.push_back(out) # fill source and target images crop_np = np.zeros((batch_size, 3, 512, 832), dtype=np.float32) #target_np = np.zeros( (batch_size,1,512,832), dtype=np.float32 ) talloc = time.time() - talloc timing["++alloc_arrays"] += talloc if verbose: print "time to allocate memory (and copy) for numpy arrays: ", talloc, "secs" nsets = nimgs # loop over images from cropper
import os,sys import ROOT import numpy as np from larcv import larcv print larcv.Image2D # TESTS MATRIX MULTIPLICATION FEATURE a = np.random.rand(6,7) b = np.random.rand(6,7) aI = larcv.Image2D( a.shape[0], a.shape[1] ) bI = larcv.Image2D( b.shape[0], b.shape[1] ) arows = a.shape[0] acols = a.shape[1] brows = b.shape[0] bcols = b.shape[1] for r in range(0,arows): for c in range(0,acols): aI.set_pixel( r, c, a[r,c] ) for r in range(0,brows): for c in range(0,bcols): bI.set_pixel( r, c, b[r,c] ) C = a*b CI = aI.eltwise(bI)
mask.configure(cfg3) stream1.initialize() stream2.initialize() p.initialize() mask.initialize() if not o.initialize(): sys.exit(ERROR_WRITE_INIT) for idx in xrange(NUM_EVENT): #we have to make the image from ImageMeta if we are going to #use channel status as it checks image2d.plane meta1 = larcv.ImageMeta(10, 10, 10, 10, 0, 10, 0) img1 = larcv.Image2D(meta1) for x in xrange(img1.as_vector().size()): if x % 2 == 0: img1.set_pixel(x, 10) else: img1.set_pixel(x, 0) meta2 = larcv.ImageMeta(10, 10, 10, 10, 0, 10, 0) img2 = larcv.Image2D(meta2) for x in xrange(img2.as_vector().size()): img2.set_pixel(x, 10) if (x / 10) % 2 == 0: img2.set_pixel(x, 0) #Input stream 1 event_image1_tpc = o.get_data( larcv.kProductImage2D, "stream1_tpc" ) # combined image is going to steal (std::move) this from me event_image1_pmt = o.get_data(
ncrops += 1 #break print "cropped the regions: total ", ncrops # run through the network # ------------------------------------------------- out_v = [] for icrop, crop in enumerate(crop_v): crop_t = torch.from_numpy(crop).to(device) print "run input: ", crop_t.size(), "sum: ", crop_t.detach().sum() out_t = model(crop_t) out_v.append(out_t.detach().cpu().numpy()) # merge the output # -------------------------------------------------- shower_img = larcv.Image2D(wholeview_v.at(0).meta()) track_img = larcv.Image2D(wholeview_v.at(1).meta()) bg_img = larcv.Image2D(wholeview_v.at(2).meta()) shower_img.paint(0) track_img.paint(0) bg_img.paint(0) for out, img2d in zip(out_v, crop_img2d_v): # threshold scores for better compression showerslice = out[:, 1, :, :].reshape(512, 512) trackslice = out[:, 2, :, :].reshape(512, 512) bgslice = out[:, 0, :, :].reshape(512, 512) meta = img2d.meta() # back to image2d
def process_entry(self, entry_num): """ perform all actions -- send, receive, process, store -- for entry""" # get the entries ok = self._inlarcv.read_entry(entry_num) if not ok: raise RuntimeError("could not read larcv entry %d" % (entry_num)) # get data ev_wholeview = self._inlarcv.get_data(larcv.kProductImage2D, self._adc_producer) ev_chstatus = self._inlarcv.get_data(larcv.kProductChStatus, self._chstatus_producer) wholeview_v = ev_wholeview.Image2DArray() print("Wholeview meta: ", wholeview_v[0].meta().dump()) # ev_wholeview_copy = larcv.EventImage2D() labels = larcv.EventImage2D() labels.Append(larcv.Image2D(wholeview_v[0].meta())) labels.Append(larcv.Image2D(wholeview_v[1].meta())) labels.Append(larcv.Image2D(wholeview_v[2].meta())) labels_v = labels.Image2DArray() # labels_v = [larcv.Image2D(wholeview_v[0].meta()),larcv.Image2D(wholeview_v[1].meta()),larcv.Image2D(wholeview_v[2].meta())] nplanes = wholeview_v.size() run = self._inlarcv.event_id().run() subrun = self._inlarcv.event_id().subrun() event = self._inlarcv.event_id().event() print("num of planes in entry {}: ".format((run, subrun, event)), nplanes) # crop using UBSplit for infill network # we want to break the image into set crops to send in # create labels_image_v # for img in labels_v: # img.paint(0) labels_v = ublarcvapp.InfillDataCropper().ChStatusToLabels( labels_v, ev_chstatus) # we split the entire image using UBSplitDetector scfg = """Verbosity: 3 InputProducer: \"wire\" OutputBBox2DProducer: \"detsplit\" CropInModule: true OutputCroppedProducer: \"detsplit\" BBoxPixelHeight: 512 BBoxPixelWidth: 496 CoveredZWidth: 310 FillCroppedYImageCompletely: true DebugImage: false MaxImages: -1 RandomizeCrops: 0 MaxRandomAttempts: 4 MinFracPixelsInCrop: -0.0001 TickForward: true """ fcfg = open("ubsplit.cfg", 'w') print(scfg, end="", file=fcfg) fcfg.close() cfg = larcv.CreatePSetFromFile("ubsplit.cfg", "UBSplitDetector") algo = ublarcvapp.UBSplitDetector() algo.initialize() algo.configure(cfg) algo.set_verbosity(2) bbox_list = larcv.EventROI() img2du = [] img2dv = [] img2dy = [] img2d_list = [] bbox_v = larcv.EventROI().ROIArray() img2d_v = larcv.EventImage2D().Image2DArray() algo.process(wholeview_v, img2d_v, bbox_v) bbox_labels_list = larcv.EventROI() img2du_labels = [] img2dv_labels = [] img2dy_labels = [] img2d_labels_list = [] bbox_labels_v = larcv.EventROI().ROIArray() img2d_labels_v = larcv.EventImage2D().Image2DArray() algo.process(labels_v, img2d_labels_v, bbox_labels_v) algo.finalize() # seperate by planes for i in img2d_v: p = i.meta().plane() if p == 0: img2du.append(i) elif p == 1: img2dv.append(i) elif p == 2: img2dy.append(i) for i in img2d_labels_v: p = i.meta().plane() if p == 0: img2du_labels.append(i) elif p == 1: img2dv_labels.append(i) elif p == 2: img2dy_labels.append(i) img2d_list.append(img2du) img2d_list.append(img2dv) img2d_list.append(img2dy) img2d_labels_list.append(img2du_labels) img2d_labels_list.append(img2dv_labels) img2d_labels_list.append(img2dy_labels) for plane in img2d_list: print("In list", len(plane)) for plane in img2d_labels_list: print("In labels list", len(plane)) # sparsify image 2d thresholds = std.vector("float")(1, 10.0) sparseimg_list = [] usparse_v = [] vsparse_v = [] ysparse_v = [] for a, b in zip(img2d_list, img2d_labels_list): for img, label in zip(a, b): p = img.meta().plane() sparse_img = larcv.SparseImage(img, label, thresholds) if (p == 0): usparse_v.append(sparse_img) elif (p == 1): vsparse_v.append(sparse_img) elif (p == 2): ysparse_v.append(sparse_img) sparseimg_list.append(usparse_v) sparseimg_list.append(vsparse_v) sparseimg_list.append(ysparse_v) for plane in sparseimg_list: print("In sparse list", len(plane)) # send messages # (send crops to worker to go through network) replies = self.send_image_list(sparseimg_list, run=run, subrun=subrun, event=event) print("FINISHED SEND STEP") self.process_received_images(wholeview_v, ev_chstatus, replies, img2d_list) print("FINISHED PROCESS STEP") self._outlarcv.set_id(self._inlarcv.event_id().run(), self._inlarcv.event_id().subrun(), self._inlarcv.event_id().event()) self._outlarcv.save_entry() print("SAVED ENTRY") return True
def process_received_images(self, wholeview_v, ev_chstatus, outimg_v, img2d_list): """ receive the list of images from the worker """ # first change sparse to dense outdense_v = [] denseu = [] densev = [] densey = [] for plane in xrange(len(outimg_v)): for img in xrange(len(outimg_v[plane])): predimg = larcv.Image2D(img2d_list[plane][img].meta()) predimg = outimg_v[plane][img].as_Image2D() if plane == 0: denseu.append(predimg) elif plane == 1: densev.append(predimg) elif plane == 2: densey.append(predimg) outdense_v.append(denseu) outdense_v.append(densev) outdense_v.append(densey) print("size of dense array", len(outdense_v)) # this is where we stitch the crops together nplanes = wholeview_v.size() ev_infill = self._outlarcv.\ get_data(larcv.kProductImage2D, self._infill_tree_name) if self._save_adc_image: # save a copy of input image ev_input = self._outlarcv.\ get_data(larcv.kProductImage2D, self._adc_producer) for p in xrange(nplanes): # create final output image outputimg = larcv.Image2D(wholeview_v.at(p).meta()) outputimg.paint(0) # temp image to use for averaging later overlapcountimg = larcv.Image2D(wholeview_v.at(p).meta()) overlapcountimg.paint(0) nimgsets = len(outimg_v[p]) # print ("nimgsets",nimgsets) output_meta = outputimg.meta() # loop through all crops to stitch onto outputimage for iimg in xrange(len(outdense_v[p])): # print (len(outdense_v[p][iimg])) ublarcvapp.InfillImageStitcher().Croploop( output_meta, outdense_v[p][iimg][0], outputimg, overlapcountimg) # creates overlay image and takes average where crops overlapped ublarcvapp.InfillImageStitcher().Overlayloop( p, output_meta, outputimg, overlapcountimg, wholeview_v, ev_chstatus) ev_infill.Append(outputimg) if self._save_adc_image: ev_input.Append(wholeview_v.at(p))
adc_v = io.get_data(larcv.kProductImage2D, ADC_PRODUCER).Image2DArray() ev_badch = io.get_data(larcv.kProductChStatus, CHSTATUS_PRODUCER) if args.has_mc: print("Retrieving larflow truth...") ev_larflow = io.get_data(larcv.kProductImage2D, "larflow") flow_v = ev_larflow.Image2DArray() if args.has_wirecell: # make wirecell masked image print("making wirecell masked image") start_wcmask = time.time() ev_wcthrumu = io.get_data(larcv.kProductImage2D, "thrumu") ev_wcwire = io.get_data(larcv.kProductImage2D, "wirewc") for p in xrange(adc_v.size()): adc = larcv.Image2D(adc_v[p]) # a copy np_adc = larcv.as_ndarray(adc) np_wc = larcv.as_ndarray(ev_wcthrumu.Image2DArray()[p]) np_adc[np_wc > 0.0] = 0.0 masked = larcv.as_image2d_meta(np_adc, adc.meta()) ev_wcwire.Append(masked) adc_v = ev_wcwire.Image2DArray() end_wcmask = time.time() print("time to mask: ", end_wcmask - start_wcmask, " secs") t_badch = time.time() badch_v = badchmaker.makeBadChImage(4, 3, 2400, 6 * 1008, 3456, 6, 1, ev_badch) print("Number of badcv images: ", badch_v.size()) gapch_v = badchmaker.findMissingBadChs(adc_v, badch_v, 10.0, 100) for p in xrange(badch_v.size()):
def process_received_images(self, wholeview_v, replies_vv): """ receive the list of images from the worker we create a track and shower image. we also save the raw 5-class sparse image as well: HIP MIP SHOWER DELTA MICHEL """ nplanes = wholeview_v.size() # save the sparse image data for p in xrange(nplanes): ev_sparse_output = self._outlarcv.get_data( larcv.kProductSparseImage, "{}_plane{}".format(self._sparseout_tree_name, p)) replies_v = replies_vv[p] for reply in replies_v: ev_sparse_output.Append(reply) self._log.info("Saving {} sparse images for plane {}".format( ev_sparse_output.SparseImageArray().size(), p)) # make the track/shower images #ev_shower = self._outlarcv.get_data(larcv.kProductImage2D, "sparseuresnet_shower" ) #ev_track = self._outlarcv.get_data(larcv.kProductImage2D, "sparseuresnet_track" ) #ev_bg = self._outlarcv.get_data(larcv.kProductImage2D, "sparseuresnet_background" ) ev_pred = self._outlarcv.get_data(larcv.kProductImage2D, "sparseuresnet_prediction") # in order to seemlessly work with vertexer ev_uburn = [ self._outlarcv.get_data(larcv.kProductImage2D, "uburn_plane{}".format(p)) for p in xrange(3) ] #shower_v = std.vector("larcv::Image2D")() #track_v = std.vector("larcv::Image2D")() #bground_v = std.vector("larcv::Image2D")() pred_v = std.vector("larcv::Image2D")() for p in xrange(wholeview_v.size()): img = wholeview_v.at(p) #showerimg = larcv.Image2D(img.meta()) #showerimg.paint(0.0) #shower_v.push_back( showerimg ) #trackimg = larcv.Image2D(img.meta()) #trackimg.paint(0.0) #track_v.push_back( trackimg ) #bgimg = larcv.Image2D(img.meta()) #bgimg.paint(0.0) #bground_v.push_back( bgimg ) predimg = larcv.Image2D(img.meta()) predimg.paint(0.0) pred_v.push_back(predimg) for p in xrange(wholeview_v.size()): predimg = pred_v.at(p) wholemeta = wholeview_v.at(p).meta() uburn_track = larcv.Image2D(wholeview_v.at(p).meta()) uburn_shower = larcv.Image2D(wholeview_v.at(p).meta()) uburn_track.paint(0.0) uburn_shower.paint(0.0) for sparseout in replies_vv[p]: npts = int(sparseout.pixellist().size() / (sparseout.nfeatures() + 2)) #print("num points: {}".format(npts)) #print("num features: {}".format(sparseout.nfeatures()+2)) stride = int(sparseout.nfeatures() + 2) sparse_meta = sparseout.meta(0) for ipt in xrange(npts): row = int(sparseout.pixellist().at(stride * ipt + 0)) col = int(sparseout.pixellist().at(stride * ipt + 1)) hip = sparseout.pixellist().at(stride * ipt + 2) mip = sparseout.pixellist().at(stride * ipt + 3) shr = sparseout.pixellist().at(stride * ipt + 4) dlt = sparseout.pixellist().at(stride * ipt + 5) mic = sparseout.pixellist().at(stride * ipt + 6) bg = 1 - (hip + mip + shr + dlt + mic) totshr = shr + dlt + mic tottrk = hip + mip maxscore = 0. maxarg = 0 for i, val in enumerate([hip, mip, totshr]): if val > maxscore: maxarg = i maxscore = val if maxarg == 1: pred = 2 # track elif maxarg == 2: pred = 3 # shower else: pred = 1 # hip # translate to different meta try: xrow = wholemeta.row(sparse_meta.pos_y(row)) xcol = wholemeta.col(sparse_meta.pos_x(col)) #showerimg.set_pixel( xrow, xcol, totshr ) #trackimg.set_pixel( xrow, xcol, tottrk ) #bgimg.set_pixel( xrow, xcol, bg ) predimg.set_pixel(xrow, xcol, pred) uburn_track.set_pixel(xrow, xcol, tottrk) uburn_shower.set_pixel(xrow, xcol, totshr) except: self._log.info( "error assigning {} -- {} to wholeview. meta={}". format((col, row), (sparse_meta.pos_x(col), sparse_meta.pos_y(row)), sparse_meta.dump())) ev_uburn[p].Append(uburn_shower) ev_uburn[p].Append(uburn_track) #ev_shower.Append( showerimg ) #ev_track.Append( trackimg ) #ev_bg.Append( bgimg ) ev_pred.Append(predimg)
def main(): inputfiles = ["/mnt/disk1/nutufts/kmason/data/sparseinfill_data_test.root"] outputfile = ["sparseoutput.root"] CHECKPOINT_FILE = "../training/vplane_24000.tar" trueadc_h = TH1F('adc value', 'adc value', 100, 0., 100.) predictadc_h = TH1F('adc value', 'adc value', 100, 0., 100.) diffs2d_thresh_h = TH2F('h2', 'diff2d', 90, 10., 100., 90, 10., 100.) diff2d_h = TH2F('h2', 'diff2d', 100, 0., 100., 100, 0., 100.) if GPUMODE: DEVICE = torch.device("cuda:%d" % (DEVICE_IDS[0])) else: DEVICE = torch.device("cpu") iotest = load_infill_larcvdata("infillsparsetest", inputfiles, batchsize, nworkers, "ADCMasked", "ADC", plane, tickbackward=tickbackward, readonly_products=readonly_products) inputmeta = larcv.IOManager(larcv.IOManager.kREAD) inputmeta.add_in_file(inputfiles[0]) inputmeta.initialize() # setup model model = SparseInfill((image_height, image_width), reps, ninput_features, noutput_features, nplanes, show_sizes=False).to(DEVICE) # load checkpoint data checkpoint = torch.load( CHECKPOINT_FILE, map_location=CHECKPOINT_MAP_LOCATIONS) # load weights to gpuid best_prec1 = checkpoint["best_prec1"] model.load_state_dict(checkpoint["state_dict"]) tstart = time.time() # values for average accuracies totacc2 = 0 totacc5 = 0 totacc10 = 0 totacc20 = 0 totalbinacc = 0 # output IOManager outputdata = larcv.IOManager(larcv.IOManager.kWRITE, "IOManager", larcv.IOManager.kTickForward) outputdata.set_out_file("sparseoutput.root") outputdata.initialize() # save to output file ev_out_ADC = outputdata.get_data(larcv.kProductImage2D, "ADC") ev_out_input = outputdata.get_data(larcv.kProductImage2D, "Input") ev_out_output = outputdata.get_data(larcv.kProductImage2D, "Output") ev_out_overlay = outputdata.get_data(larcv.kProductImage2D, "Overlay") totaltime = 0 for n in xrange(nentries): starttime = time.time() print "On entry: ", n inputmeta.read_entry(n) ev_meta = inputmeta.get_data(larcv.kProductSparseImage, "ADC") outmeta = ev_meta.SparseImageArray()[0].meta_v() model.eval() infilldict = iotest.get_tensor_batch(DEVICE) coord_t = infilldict["coord"] input_t = infilldict["ADCMasked"] true_t = infilldict["ADC"] # run through model predict_t = model(coord_t, input_t, 1) forwardpasstime = time.time() predict_t.detach().cpu().numpy() input_t.detach().cpu().numpy() true_t.detach().cpu().numpy() # calculate accuracies labels = input_t.eq(0).float() chargelabel = labels * (true_t > 0).float() totaldeadcharge = chargelabel.sum().float() totaldead = labels.sum().float() predictdead = labels * predict_t truedead = true_t * labels predictcharge = chargelabel * predict_t truecharge = chargelabel * true_t err = (predictcharge - truecharge).abs() totacc2 += (err.lt(2).float() * chargelabel.float()).sum().item() / totaldeadcharge totacc5 += (err.lt(5).float() * chargelabel.float()).sum().item() / totaldeadcharge totacc10 += (err.lt(10).float() * chargelabel.float()).sum().item() / totaldeadcharge totacc20 += (err.lt(20).float() * chargelabel.float()).sum().item() / totaldeadcharge bineq0 = (truedead.eq(0).float() * predictdead.eq(0).float() * labels).sum().item() bingt0 = (truedead.gt(0).float() * predictdead.gt(0).float()).sum().item() totalbinacc += (bineq0 + bingt0) / totaldead # construct dense images ADC_img = larcv.Image2D(image_width, image_height) Input_img = larcv.Image2D(image_width, image_height) Output_img = larcv.Image2D(image_width, image_height) Overlay_img = larcv.Image2D(image_width, image_height) ADC_img, Input_img, Output_img, Overlay_img, trueadc_h, predictadc_h, diff2d_h, diffs2d_thresh_h = pixelloop( true_t, coord_t, predict_t, input_t, ADC_img, Input_img, Output_img, Overlay_img, trueadc_h, predictadc_h, diff2d_h, diffs2d_thresh_h) ev_out_ADC.Append(ADC_img) ev_out_input.Append(Input_img) ev_out_output.Append(Output_img) ev_out_overlay.Append(Overlay_img) outputdata.set_id(ev_meta.run(), ev_meta.subrun(), ev_meta.event()) outputdata.save_entry() endentrytime = time.time() print "total entry time: ", endentrytime - starttime print "forward pass time: ", forwardpasstime - starttime totaltime += forwardpasstime - starttime avgacc2 = (totacc2 / nentries) * 100 avgacc5 = (totacc5 / nentries) * 100 avgacc10 = (totacc10 / nentries) * 100 avgacc20 = (totacc20 / nentries) * 100 avgbin = (totalbinacc / nentries) * 100 tend = time.time() - tstart print "elapsed time, ", tend, "secs ", tend / float(nentries), " sec/batch" print "average forward pass time: ", totaltime / nentries print "--------------------------------------------------------------------" print " For dead pixels that should have charge..." print "<2 ADC of true: ", avgacc2.item(), "%" print "<5 ADC of true: ", avgacc5.item(), "%" print "<10 ADC of true: ", avgacc10.item(), "%" print "<20 ADC of true: ", avgacc20.item(), "%" print "binary acc in dead: ", avgbin.item(), "%" print "--------------------------------------------------------------------" # create canvas to save as pngs # ADC values rt.gStyle.SetOptStat(0) c1 = TCanvas("ADC Values", "ADC Values", 600, 600) trueadc_h.GetXaxis().SetTitle("ADC Value") trueadc_h.GetYaxis().SetTitle("Number of pixels") c1.UseCurrentStyle() trueadc_h.SetLineColor(632) predictadc_h.SetLineColor(600) c1.SetLogy() trueadc_h.Draw() predictadc_h.Draw("SAME") legend = TLegend(0.1, 0.7, 0.48, 0.9) legend.AddEntry(trueadc_h, "True Image", "l") legend.AddEntry(predictadc_h, "Output Image", "l") legend.Draw() c1.SaveAs(("ADCValues.png")) # 2d ADC difference histogram c2 = TCanvas("diffs2D", "diffs2D", 600, 600) c2.UseCurrentStyle() line = TLine(0, 0, 80, 80) line.SetLineColor(632) diff2d_h.SetOption("COLZ") c2.SetLogz() diff2d_h.GetXaxis().SetTitle("True ADC value") diff2d_h.GetYaxis().SetTitle("Predicted ADC value") diff2d_h.Draw() line.Draw() c2.SaveAs(("diffs2d.png")) # 2d ADC difference histogram - thresholded c3 = TCanvas("diffs2D_thresh", "diffs2D_thresh", 600, 600) c3.UseCurrentStyle() line = TLine(10, 10, 80, 80) line.SetLineColor(632) diffs2d_thresh_h.SetOption("COLZ") diffs2d_thresh_h.GetXaxis().SetTitle("True ADC value") diffs2d_thresh_h.GetYaxis().SetTitle("Predicted ADC value") diffs2d_thresh_h.Draw() line.Draw() c3.SaveAs(("diffs2d_thresh.png")) # save results outputdata.finalize()