Example #1
0
    def process_reply(self, frames):
        parts = len(frames)

        # for each batch of images, we save an entry ...
        # how to handle multiple batches? will need a batch to eventid map
        # when eventid changes, then save

        for i in range(0, parts, 3):
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_enc = frames[i + 2]

            meta = decode_larcv1_metamsg(metamsg)
            arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            print "SimpleLArCV1Client[{}] received array name=\"{}\" shape={} meta={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip())
            nbatches = arr.shape[0]

            for ib in range(nbatches):

                rse = self.batch2rse[ib]
                if self.current_rse is not None and rse != self.current_rse:
                    # save output entry
                    self.io_out.set_id(rse[0], rse[1], rse[2])
                    print "SimpleLArCV1Client[{}] saving entry {}".format(
                        self._identity, rse)
                    self.io_out.save_entry()
                    self.io_out.clear_entry()

                self.current_rse = rse

                output_ev_container = self.io_out.get_data(
                    larcv.kProductImage2D, str(name))
                for i in range(arr.shape[1]):
                    meta = decode_larcv1_metamsg(metamsg, i)
                    img = larcv.as_image2d_meta(
                        np.transpose(arr[ib, i, :], (1, 0)), meta)
                    output_ev_container.Append(img)

        # save output entry
        self.io_out.set_id(rse[0], rse[1], rse[2])
        self.io_out.save_entry()
        self.io_out.clear_entry()
        self.current_rse = rse
def decode_larcv2_evimage2d(io, producername, imgdata_np, imgmeta_np):

    # make evcontainer
    evout = io.get_data("image2d", producername)

    # make meta
    nimgs = imgdata_np.shape[1]
    for i in xrange(nimgs):
        nrows = Long(imgmeta_np[0, i, 0, 1])
        ncols = Long(imgmeta_np[0, i, 0, 0])
        planeid = Long(imgmeta_np[0, i, 0, 6])

        lcvmeta = larcv.ImageMeta(imgmeta_np[0, i, 0, 2], imgmeta_np[0, i, 0,
                                                                     3],
                                  imgmeta_np[0, i, 0, 4], imgmeta_np[0, i, 0,
                                                                     5], nrows,
                                  ncols, planeid)
        # convert image
        outarr = np.flip(imgdata_np[0, i, :, :].transpose((1, 0)), 0)
        lcvimg = larcv.as_image2d_meta(outarr, lcvmeta)
        evout.append(lcvimg)

    return
Example #3
0
    for row in xrange(src_np.shape[1]):
        flow_np[:, row] += colidx[:]
    flow_np[src_np < 10] = 0
    flow_np[flow_np < 0] = 0
    flow_np[flow_np >= src_np.shape[0]] = 0

    flowed_np = np.zeros(src_np.shape, dtype=np.float32)
    pixlist = np.argwhere(flow_np != 0)
    print("pixlist: ", pixlist.shape)
    for idx in xrange(pixlist.shape[0]):
        row = pixlist[idx, 1]
        src_col = pixlist[idx, 0]
        tar_col = flow_np[src_col, row]
        #print(row,src_col,"to",tar_col)
        flowed_np[tar_col, row] = src_np[src_col, row]
    flowed_lcv = larcv.as_image2d_meta(flowed_np, tarimg.meta())
    hflowed = larcv.as_th2d(flowed_lcv, "hflowed_{}".format(iset))
    hflowed.GetZaxis().SetRangeUser(0, 100)
    hflowed.SetTitle("Flowed Pixels from {};wire;tick".format(flowdir))

    c.cd(1)
    hsrc.Draw("COLZ")

    c.cd(2)
    hflowed.Draw("COLZ")

    c.cd(3)
    htar.Draw("COLZ")

    c.Update()
    c.Draw()
Example #4
0
        # get probabilities from 0-1
        labels_np = 10**labels_np

        for ib in range(batch_size):
            if ientry >= nevts:
                # skip last portion of last batch
                break

            inputmeta.read_entry(ientry)
            ev_meta = inputmeta.get_data("image2d", "wire")
            outmeta = ev_meta.image2d_array()[2].meta()

            # save output of network as images
            img_slice0 = labels_np[ib, 0, :, :]
            if SAVE_NOFILL:
                nofill_lcv = larcv.as_image2d_meta(img_slice0, outmeta)
                ev_nofill_out = outputdata.get_data("image2d", "nofill")
                ev_nofill_out.append(nofill_lcv)

            img_slice1 = labels_np[ib, 1, :, :]
            if SAVE_FILL:
                fill_lcv = larcv.as_image2d_meta(img_slice1, outmeta)
                ev_fill_out = outputdata.get_data("image2d", "fill")
                ev_fill_out.append(fill_lcv)

            #save inputs to network for reference
            wire_slice = wire_t[ib, 0, :, :]
            wire_out = larcv.as_image2d_meta(wire_slice, outmeta)
            ev_out_wire.append(wire_out)

            weight_slice = weight_t[ib, 0, :, :]
Example #5
0
                # setting uninteresting pixels to zero is important for good file size
                thresh_slice = (source_np[ib, 0, :, :] < threshold)
                #adcgoodch_slice = source_np[ib,0,:,status_batch[ib]==4].transpose((1,0))

                # SKIP CHSTATUS MASK FOR NOW
                #nmasked = 0
                #for n,goodch in enumerate( np.nditer( status_batch[ib] ) ):
                #    if goodch==4:
                #        nmasked += (flow_slice[:,n]<threshold).sum()
                #        flow_slice[:,n][ source_np[ib,0,:,n]<threshold ] = 0

                # zero regions in good channel list and below threshold
                #print "nmasked estimate: ",nmasked
                #print "flow_slice non-zero (post-mask): ",(flow_slice!=0).sum()

                flow_lcv = larcv.as_image2d_meta(
                    np.transpose(flow_slice, (1, 0)), image_meta[ib])
                if stitch:
                    # load cropped info into stitcher
                    # -------------------------------
                    stitcher.insertFlowSubimage(flow_lcv, target_meta[ib])
                else:
                    # save cropped images
                    #--------------------
                    # prediction images
                    evoutpred = outputdata.get_data(
                        larcv.kProductImage2D,
                        "larflow_%s" % (FLOWDIR.lower()))
                    evoutpred.Append(flow_lcv)

                    # input cropped source and target image
                    if save_cropped_adc:
            # -------------
            # Store results
            tcopy = time.time()
            for ib in xrange(min(batch_size, len(image_meta[p]))):
                isgood = True
                for p in xrange(3):
                    if image_meta[p][ib] is None:
                        isgood = False
                if not isgood:
                    continue

                # convert data to larcv
                ssnet_lcv = {}
                ssnet_lcv["track"] = [
                    larcv.as_image2d_meta(result_np[p][ib, 1, :],
                                          image_meta[p][ib]) for p in xrange(3)
                ]
                ssnet_lcv["shower"] = [
                    larcv.as_image2d_meta(result_np[p][ib, 2, :],
                                          image_meta[p][ib]) for p in xrange(3)
                ]
                ssnet_lcv["endpt"] = [
                    larcv.as_image2d_meta(result_np[p][ib, 3, :],
                                          image_meta[p][ib]) for p in xrange(3)
                ]

                # if stiching, store into stitch
                if stitch:
                    outmeta = out_v[p].meta()  # stitch meta
                    for p in xrange(3):
                        stitcher_track.insertFlowSubimage(
Example #7
0
    def process_precropped_reply(self, frames):

        # message parts consist of ssnet output
        # one part contains a batch for one plane
        # we must collect data for all three planes for an event, before writing it to disk

        # by construction, one batch is for one event
        # one message contains one batch
        # this makes it a lot easier to understand
        # someone smarter can make general code

        treply = time.time()

        plane_img_v_dict = {}

        parts = len(frames)
        for i in range(0, parts, 3):
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_comp = frames[i + 2]
            x_enc = zlib.decompress(x_comp)

            if name not in plane_img_v_dict:
                plane_img_v_dict[name] = [
                    std.vector("larcv::Image2D")() for x in range(self.NPLANES)
                ]

            meta = decode_larcv1_metamsg(metamsg)
            arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            nbatches = arr.shape[0]
            print "CaffeLArCV1Client[{}] received array name=\"{}\" shape={} meta={} batchsize={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip(), nbatches)

            for ib in range(nbatches):
                # set the RSE
                rse = self.batch2rse[ib]
                self.current_rse = rse

                #img = larcv.as_image2d_meta( np.transpose( arr[ib,0,:], (1,0) ), meta ) # not needed?
                img = larcv.as_image2d_meta(arr[ib, 0, :], meta)
                print "fill ", name, " meta=", meta.dump().strip()
                plane_img_v_dict[name][meta.plane()].push_back(img)

        # make output event containers
        print "CaffeLArCV1Client[{}] storing images".format(self._identity)
        for name, plane_img_v in plane_img_v_dict.items():
            print "name: ", name, plane_img_v
            for img_v in plane_img_v:
                print img_v
                print img_v.size()
                planeid = img_v.front().meta().plane()
                print "CaffeLArCV1Client[{}] storing name={} plane={}".format(
                    self._identity, name, planeid)
                outname = "%s_plane%d" % (str(name.decode("ascii")), planeid)
                print "Filling event container: ", outname
                output_ev_container = self.io_out.get_data(
                    larcv.kProductImage2D, outname)
                for iimg in range(img_v.size()):
                    output_ev_container.Append(img_v[iimg])

        # save output entry
        self.io_out.set_id(rse[0], rse[1], rse[2])
        self.io_out.save_entry()
        self.io_out.clear_entry()
        self.current_rse = rse

        treply = time.time() - treply
        self._ttracker["savereply::total"] += treply
Example #8
0
for out, img2d in zip(out_v, crop_img2d_v):

    # threshold scores for better compression
    showerslice = out[:, 1, :, :].reshape(512, 512)
    trackslice = out[:, 2, :, :].reshape(512, 512)
    bgslice = out[:, 0, :, :].reshape(512, 512)

    meta = img2d.meta()
    # back to image2d
    shrmeta = larcv.ImageMeta(meta.width(), meta.height(), meta.rows(),
                              meta.cols(), meta.min_x(), meta.min_y(), 0)
    trkmeta = larcv.ImageMeta(meta.width(), meta.height(), meta.rows(),
                              meta.cols(), meta.min_x(), meta.min_y(), 1)
    bgmeta = larcv.ImageMeta(meta.width(), meta.height(), meta.rows(),
                             meta.cols(), meta.min_x(), meta.min_y(), 2)
    showercrop = larcv.as_image2d_meta(showerslice, shrmeta)
    shower_img.overlay(showercrop, larcv.Image2D.kOverWrite)
    trackcrop = larcv.as_image2d_meta(trackslice, trkmeta)
    track_img.overlay(trackcrop, larcv.Image2D.kOverWrite)
    bgcrop = larcv.as_image2d_meta(bgslice, bgmeta)
    bg_img.overlay(bgcrop, larcv.Image2D.kOverWrite)

h2d = larcv.as_th2d(shower_img, "shower_img")
c.Clear()
h2d.Draw("colz")
c.Update()
print "enter to go to next image"
raw_input()

ev_ssnetout = outio.get_data(larcv.kProductImage2D, "ssnet_plane2")
ev_ssnetout.Append(shower_img)
        for ib in range(batch_size):
            if ientry >= nevts:
                # skip last portion of last batch
                break
            # evtinfo   = data["event_ids"][ib] #Change event_ids to just id?
            #outmeta   = data["source_test"][ib].meta()
            inputmeta.read_entry(ientry)
            ev_meta = inputmeta.get_data("image2d", "adc")
            # if ev_meta.run()!=evtinfo.run() or ev_meta.subrun()!=evtinfo.subrun() or ev_meta.event()!=evtinfo.event():
            # raise RuntimeError("(run,subrun,event) for evtinfo and ev_meta do not match!")

            outmeta = ev_meta.image2d_array()[2].meta()

            img_slice0 = labels_np[ib, 0, :, :]
            background_lcv = larcv.as_image2d_meta(img_slice0, outmeta)
            ev_out = outputdata.get_data("image2d", "background")
            ev_out.append(background_lcv)

            img_slice1 = labels_np[ib, 1, :, :]
            track_lcv = larcv.as_image2d_meta(img_slice1, outmeta)
            ev_out = outputdata.get_data("image2d", "track")
            ev_out.append(track_lcv)

            img_slice2 = labels_np[ib, 2, :, :]
            shower_lcv = larcv.as_image2d_meta(img_slice2, outmeta)
            ev_out = outputdata.get_data("image2d", "shower")
            ev_out.append(shower_lcv)

            img_slice3 = labels_np[ib, 3, :, :]
            track_end_lcv = larcv.as_image2d_meta(img_slice3, outmeta)
                    "imgdump/img_%d_%d_%d_target.png" % (ientry, iset + 1, ib),
                    crop_np[ib, target_plane, :, :])

                # store region of image
                #image_meta.append(  larcv.ImageMeta( bb_v[2], 512, 832 ) )
                #target_meta.append( larcv.ImageMeta( bb_v[target_plane], 512, 832 ) )
                image_meta.append(bb_v[2])
                target_meta.append(bb_v[target_plane])
                adc_metas.append(bb_v)

                # if not stiching, save crops
                if not stitch:
                    flowcrops = {"flow": [], "visi": [], "adc": []}
                    if save_cropped_adc:
                        for p in xrange(0, 3):
                            adc_lcv = larcv.as_image2d_meta(
                                crop_np[ib, p, :].transpose((1, 0)), bb_v[p])
                            flowcrops["adc"].append(adc_lcv)
                        #if target_plane==0:
                        #    adc_lcv0 = larcv.as_image2d_meta( target_np[ib,0,:].transpose((1,0)), bb_v[0] )
                        #    flowcrops["adc"].append( adc_lcv0  )
                        #    adc_lcv1 = larcv.as_image2d_meta( np.flip(img_np[1,0,bounds[1][0]:bounds[1][2],bounds[1][1]:bounds[1][3]],0).transpose((1,0)), bb_v[1] )
                        #    flowcrops["adc"].append( adc_lcv1  )
                        #elif target_plane==1:
                        #    adc_lcv0 = larcv.as_image2d_meta( np.flip(img_np[0,0,bounds[0][0]:bounds[0][2],bounds[0][1]:bounds[0][3]],0).transpose((1,0)), bb_v[0] )
                        #    flowcrops["adc"].append( adc_lcv0  )
                        #    adc_lcv1 = larcv.as_image2d_meta( target_np[ib,0,:].transpose((1,0)), bb_v[1] )
                        #    flowcrops["adc"].append( adc_lcv1  )
                        #adc_lcv2 = larcv.as_image2d_meta( source_np[ib,0,:].transpose((1,0)), bb_v[2] )
                        #flowcrops["adc"].append( adc_lcv2  )

                    if ismc:
        t_forward = time.time()
        output_np = [
            predict_fn({"uplane": input_np[p]})["pred"] for p in range(NPLANES)
        ]
        timer["forward"] += time.time() - t_forward

        # write to disk
        t_disk = time.time()
        event_ssnet_containers = [
            out.get_data(larcv.kProductImage2D, "ssnet_plane%d" % (p))
            for p in range(NPLANES)
        ]
        for p in range(NPLANES):
            for c in range(NCLASSES):
                event_ssnet_containers[p].Append(
                    larcv.as_image2d_meta(output_np[p][0, :, :, c],
                                          img_v[p].meta()))
        out.set_id(event_image_container.run(), event_image_container.subrun(),
                   event_image_container.event())
        out.save_entry()
        timer["writeout"] += time.time() - t_disk

        timer["totentry"] += time.time() - t_entry

    print "End of entry loop"
    print "Finalize output"
    out.finalize()
    timer["total"] = time.time() - timer["total"]

    print "Timing for different steps"
    print "--------------------------"
    for k, i in timer.items():
Example #12
0
            trun = time.time()
            pred_flow, pred_visi = model.forward( source_t, target_t )
            trun = time.time()-trun
            timing["+++run_model"] += trun
            if verbose:
                print "time to run model: ",trun," secs"            

            # turn pred_flow back into larcv
            tcopy = time.time()
            flow_np = pred_flow.detach().cpu().numpy().astype(np.float32)
            outmeta = out_v[0].meta()
            for ib in range(min(batch_size,len(image_meta))):
                if image_meta[ib] is None:
                    continue
                img_slice = flow_np[ib,0,:]
                flow_lcv = larcv.as_image2d_meta( img_slice, image_meta[ib] )
                stitcher.insertFlowSubimage( flow_lcv, target_meta[ib] ) 
            tcopy = time.time()-tcopy
            timing["+++copy_to_output"] += tcopy
            if verbose:
                print "time to copy results back into full image: ",tcopy," secs"


        # end of while loop
        if verbose:
            print "Processed all the images"

        tout = time.time()
        outputdata.read_entry(ientry)
        stitcher.process( outputdata )
        outputdata.save_entry()
Example #13
0
        print("Retrieving larflow truth...")
        ev_larflow = io.get_data(larcv.kProductImage2D, "larflow")
        flow_v = ev_larflow.Image2DArray()

    if args.has_wirecell:
        # make wirecell masked image
        print("making wirecell masked image")
        start_wcmask = time.time()
        ev_wcthrumu = io.get_data(larcv.kProductImage2D, "thrumu")
        ev_wcwire = io.get_data(larcv.kProductImage2D, "wirewc")
        for p in xrange(adc_v.size()):
            adc = larcv.Image2D(adc_v[p])  # a copy
            np_adc = larcv.as_ndarray(adc)
            np_wc = larcv.as_ndarray(ev_wcthrumu.Image2DArray()[p])
            np_adc[np_wc > 0.0] = 0.0
            masked = larcv.as_image2d_meta(np_adc, adc.meta())
            ev_wcwire.Append(masked)
        adc_v = ev_wcwire.Image2DArray()
        end_wcmask = time.time()
        print("time to mask: ", end_wcmask - start_wcmask, " secs")

    t_badch = time.time()
    badch_v = badchmaker.makeBadChImage(4, 3, 2400, 6 * 1008, 3456, 6, 1,
                                        ev_badch)
    print("Number of badcv images: ", badch_v.size())
    gapch_v = badchmaker.findMissingBadChs(adc_v, badch_v, 10.0, 100)
    for p in xrange(badch_v.size()):
        for c in xrange(badch_v[p].meta().cols()):
            if (gapch_v[p].pixel(0, c) > 0):
                badch_v[p].paint_col(c, 255)
    dt_badch = time.time() - t_badch
Example #14
0
        for p in range(0,NPLANES):
            nets[p].forward()
        timer["forward"] += time.time()-t_forward

        # retrive the data
        t_out = time.time()
        for p in range(0,NPLANES):
            output_np[p][0,:,:,:] = nets[p].blobs['softmax'].data[:]
        timer["copyout"]  = time.time()-t_out

        # write to disk
        t_disk = time.time()
        event_ssnet_containers = [ out.get_data( larcv.kProductImage2D, "ssnet_plane%d"%(p) ) for p in range(NPLANES) ]
        for p in range(NPLANES):
            for c in range(NCLASSES):
                event_ssnet_containers[p].Append( larcv.as_image2d_meta( output_np[p][0,c,:], img_v[p].meta() ) )
        out.set_id( event_image_container.run(), event_image_container.subrun(), event_image_container.event() )
        out.save_entry()
        timer["writeout"] += time.time()-t_disk
    
        timer["totentry"] += time.time()-t_entry
        
    print "End of entry loop"
    print "Finalize output"
    out.finalize()
    timer["total"] = time.time()-timer["total"]

    print "Timing for different steps"
    print "--------------------------"
    for k,i in timer.items():
        print k,": ",i," secs (%.2f sec/event)"%(i/float(nprocess_events))
Example #15
0
        # get predictions from gpu
        flow_np = pred_labels.detach().cpu().numpy().astype(np.float32)

        for ib in range(batch_size):
            if ientry >= nevts:
                # skip last portion of last batch
                break
            evtinfo = data["_rse_"][ib, :]
            meta_v = inputdata.getmeta(treename)
            ev_out = outputdata.get_data(larcv.kProductImage2D,
                                         "uburn_plane%d" % (plane))
            nclasses = flow_np.shape[1]
            for c in range(nclasses):
                img_slice = data[(larcv.kProductImage2D,
                                  treename)][:, plane, :, :]
                flow_lcv = larcv.as_image2d_meta(flow_np[ib, c, :, :],
                                                 meta_v[plane])
                ev_out.Append(flow_lcv)
            outputdata.set_id(evtinfo[0], evtinfo[1], evtinfo[2])
            outputdata.save_entry()
            ientry += 1

        tsave = time.time() - tsave
        timing["++save_output"] += tsave

        # end of batch
        tbatch = time.time() - tbatch
        if verbose:
            print "time for batch: ", tbatch, "secs"
        timing["+batch"] += tbatch

    # save results
Example #16
0
        meta = larcv.ImageMeta(srcmeta[b, 2, 0, 0], srcmeta[b, 2, 0, 1],
                               srcmeta[b, 2, 0, 2], srcmeta[b, 2, 0, 3],
                               IMAGE_HEIGHT, IMAGE_WIDTH, 2)
        srcmetas.append(meta)
        meta = larcv.ImageMeta(srcmeta[b, 0, 0, 0], srcmeta[b, 0, 0, 1],
                               srcmeta[b, 0, 0, 2], srcmeta[b, 0, 0, 3],
                               IMAGE_HEIGHT, IMAGE_WIDTH, 2)
        targetumetas.append(meta)
        meta = larcv.ImageMeta(srcmeta[b, 1, 0, 0], srcmeta[b, 1, 0, 1],
                               srcmeta[b, 1, 0, 2], srcmeta[b, 1, 0, 3],
                               IMAGE_HEIGHT, IMAGE_WIDTH, 2)
        targetvmetas.append(meta)

    # check image2d input
    img_adc = larcv.as_image2d_meta(
        source[0, 0, :, :].detach().cpu().numpy().transpose((1, 0)),
        srcmetas[0])
    hadc = larcv.as_th2d(img_adc, "hadc_input")
    c.cd(1)
    hadc.SetTitle("source y")
    hadc.Draw("COLZ")
    c.Update()
    #c.SaveAs("hadc_input_%d.png"%(ientry))

    #cv.imwrite( "cvadc_input_%d.png"%(ientry), source[0,0,:,:].detach().cpu().numpy() )

    img_targetu = larcv.as_image2d_meta(
        target1[0, 0, :, :].detach().cpu().numpy().transpose((1, 0)),
        targetumetas[0])
    htargetu = larcv.as_th2d(img_targetu, "htargetu_input")
    c.cd(2)
    def make_reply(self,request,nreplies):
        """
        we load each image and pass it through the net.
        we process all images before sending complete reply.
        the attribute self._still_processing_msg is used to tell us if we
        are still in the middle of a reply.
        """
        self._log.debug("make_reply:: received message with {} parts".format(len(request)))

        if not self.is_model_loaded():
            self._log.debug("model not loaded for some reason. loading.")

        try:
            import torch
        except:
            raise RuntimeError("could not load pytorch!")

        try:
            from ROOT import std
        except:
            raise RuntimeError("could not load ROOT.std")

        # message pattern: [image_bson,image_bson,...]

        nmsgs = len(request)
        nbatches = nmsgs/self.batch_size

        if not self._still_processing_msg:
            self._next_msg_id = 0

        # turn message pieces into numpy arrays
        imgset_v = {}
        img2dset_v = {}
        rseid_v  = {}
        sizes    = [] # tuples with (rows,cols,nfeatures,npoints)
        frames_used = []
        totalpts = 0
        for imsg in xrange(self._next_msg_id,nmsgs):
            try:
                compressed_data = str(request[imsg])
                data = zlib.decompress(compressed_data)
                c_run = c_int()
                c_subrun = c_int()
                c_event = c_int()
                c_id = c_int()
                imgdata = larcv.json.sparseimg_from_bson_pybytes(data,
                                        c_run, c_subrun, c_event, c_id )
            except Exception as e:
                self._log.error("Image Data in message part {}".format(imsg)
                                +" could not be converted: {}".format(e))
                continue
            self._log.debug("Image[{}] converted: nfeatures={} npts={}"\
                            .format(imsg,imgdata.nfeatures(),
                                    imgdata.pixellist().size()/(imgdata.nfeatures()+2)))
            #self._log.debug("Image[{}] meta: {}".format(imsg,imgdata.meta_v().front().dump()))

            # convert back to image2d
            imgid = c_id.value
            if imgid not in imgset_v:
                imgset_v[imgid] = []
                img2dset_v[imgid] = []
                rseid_v[imgid]=(c_run.value,c_subrun.value,c_event.value,imgid)
            img2d_v = imgdata.as_Image2D()
            #print(img2d_v.front().meta().dump())
            imgset_v[imgid].append( img2d_v.front() )
            img2dset_v[imgid].append(img2d_v)

        # run the network and produce replies

        # we run in pairs of (src,target) crops. responsibility of Client
        # to get this correct
        keys = imgset_v.keys()
        keys.sort()
        nsets = len(keys)
        current_cols = 0
        current_rows = 0
        src_np = None
        tar_np = None
        ibatch = 0
        iset = 0
        flow_v = {}
        set_v = []
        meta_v = {}

        while iset<nsets:
            setid = keys[iset]
            if len(imgset_v[setid])!=2:
                # set is not complete
                iset += 1
                continue
            if imgset_v[setid][0].meta().plane()==2:
                src = imgset_v[setid][0]
                tar = imgset_v[setid][1]
            else:
                src = imgset_v[setid][1]
                tar = imgset_v[setid][0]
                imgset_v[setid] = []
                imgset_v[setid].append(src)
                imgset_v[setid].append(tar)

            # if first set of images, create numpy array
            if src_np is None:
                imgnptype = np.float32
                if self._use_half:
                    imgnptype = np.float16
                #print("src_np: {}".format((self.batch_size,1,src.meta().rows(),src.meta().cols())))
                src_np = np.zeros( (self.batch_size,1,src.meta().rows(),src.meta().cols()), dtype=imgnptype )
                tar_np = np.zeros( (self.batch_size,1,tar.meta().rows(),tar.meta().cols()), dtype=imgnptype )
                set_v = []
                meta_v = {}

            # check that same size as previous images
            samesize = True
            if src_np.shape[2]!=src.meta().rows() or src_np.shape[3]!=src.meta().cols():
                samesize = False

            # if same size and we have not filled the batch yet, add to batch array
            if samesize and ibatch<self.batch_size:
                src_np[ibatch,0,:,:] = np.transpose( larcv.as_ndarray(src), (1,0) )
                tar_np[ibatch,0,:,:] = np.transpose( larcv.as_ndarray(tar), (1,0) )
                meta_v[setid] = src.meta()
                set_v.append(setid)
                iset += 1
                ibatch += 1

            if not samesize or ibatch==self.batch_size or iset==nsets:
                # convert to torch and run the batch through the network
                src_t  = torch.from_numpy(src_np).to(self.device)
                tar_t  = torch.from_numpy(tar_np).to(self.device)
                with torch.set_grad_enabled(False):
                    flow_t, visi_t = self.model( src_t, tar_t )

                # repress flow_t for values below threshold
                flow_t[ torch.lt(src_t,10.0) ] = 0.0

                # convert back to image2d. only use those with setid
                flow_np = flow_t.detach().cpu().numpy().astype(np.float32)
                for ib,sid in enumerate(set_v):
                    # convert back to image2d
                    flow_v[sid] = larcv.as_image2d_meta( np.transpose(flow_np[ib,0,:,:], (1,0)), meta_v[sid] )

                # reset batch variables
                set_v = []
                ibatch = 0
                src_np = None
                tar_np = None

        # turn image2d into sparseimage and ship back to client
        reply = []
        isfinal = True
        nfilled = 0

        for setid in keys:

            flow = flow_v[setid]
            flowpix = larcv.as_pixelarray_with_selection( flow,
                                                          imgset_v[setid][0],
                                                          10.0 )
            # make the sparseimage object
            outmeta_v = std.vector("larcv::ImageMeta")()
            outmeta_v.push_back( imgset_v[setid][0].meta() )
            sparseimg = larcv.sparseimg_from_ndarray( flowpix,
                                                      outmeta_v,
                                                      larcv.msg.kNORMAL )

            # convert to bson string
            rseid = rseid_v[setid]
            bson = larcv.json.as_bson_pybytes( sparseimg,
                                        rseid[0], rseid[1], rseid[2], rseid[3] )
            # compress
            compressed = zlib.compress(bson)

            # add to reply message list
            reply.append(compressed)


        self._log.debug("formed reply with {} frames. isfinal={}"
                        .format(len(reply),isfinal))
        return reply,isfinal
Example #18
0
def main(MODEL=MODEL):

    global best_prec1
    # training parameters
    lr = 2.0e-5
    momentum = 0.9
    weight_decay = 1.0e-3
    batchsize_valid = 2

    # Create model -- instantiate on the GPU
    if MODEL == 1:
        if GPUMODE:
            model = ASPP_ResNet(inplanes=16,
                                in_channels=1,
                                num_classes=3,
                                showsizes=False)
        else:
            model = ASPP_ResNet(inplanes=16, in_channels=1, num_classes=3)
    elif MODEL == 2:
        if GPUMODE:
            model = UResNet(inplanes=20,
                            input_channels=1,
                            num_classes=3,
                            showsizes=False)
        else:
            model = UResNet(inplanes=20, input_channels=1, num_classes=3)

    optimizer = torch.optim.SGD(model.parameters(),
                                lr,
                                momentum=momentum,
                                weight_decay=weight_decay)
    cudnn.benchmark = True

    # Load checkpoint and state dictionary
    # Map the checkpoint file to the CPU -- removes GPU mapping
    map_location = {"cuda:0": "cpu", "cuda:1": "cpu"}
    checkpoint = torch.load(CHECKPOINT_FILE, map_location=map_location)

    # Debugging block:
    # print "Checkpoint file mapped to CPU."
    # print "Press return to load best prediction tensor."
    # raw_input()

    best_prec1 = checkpoint["best_prec1"]
    print "state_dict size: ", len(checkpoint["state_dict"])
    print " "
    for p, t in checkpoint["state_dict"].items():
        print p, t.size()

    # Debugging block:
    # print " "
    # print "Best prediction tensor loaded."
    # print "Press return to load state dictionary."
    # raw_input()

    # Map checkpoint file to the desired GPU
    model.load_state_dict(checkpoint["state_dict"])
    model = model.cuda(GPUID)

    print " "
    print "State dictionary mapped to GPU: ", GPUID
    if MODEL == 1:
        modelString = "ASPP_ResNet"
    elif MODEL == 2:
        modelString = "caffe_uresnet"
    print "Press return to deploy:", modelString
    print "From checkpoint:", CHECKPOINT_FILE
    raw_input()

    # switch to evaluate mode
    model.eval()

    # LOAD THE DATASET
    validcfg = """ThreadDatumFillerValid: {

  Verbosity:    2
  EnableFilter: false
  RandomAccess: true
  UseThread:    false
  #InputFiles:   ["/mnt/raid0/taritree/ssnet_training_data/train02.root"]
  InputFiles:   ["/media/hdd1/larbys/ssnet_dllee_trainingdata/val.root"]
  ProcessType:  ["SegFiller"]
  ProcessName:  ["SegFiller"]

  IOManager: {
    Verbosity: 2
    IOMode: 0
    ReadOnlyTypes: [0,0,0]
    ReadOnlyNames: ["wire","segment","ts_keyspweight"]
  }

  ProcessList: {
    SegFiller: {
      # DatumFillerBase configuration
      Verbosity: 2
      ImageProducer:     "wire"
      LabelProducer:     "segment"
      WeightProducer:    "ts_keyspweight"
      # SegFiller configuration
      Channels: [2]
      SegChannel: 2
      EnableMirror: false
      EnableCrop: false
      ClassTypeList: [0,1,2]
      ClassTypeDef: [0,0,0,2,2,2,1,1,1,1]
    }
  }
}
"""
    with open("segfiller_valid.cfg", 'w') as fvalid:
        print >> fvalid, validcfg

    iovalid = LArCV1Dataset("ThreadDatumFillerValid", "segfiller_valid.cfg")
    iovalid.init()
    iovalid.getbatch(batchsize_valid)

    NENTRIES = iovalid.io.get_n_entries()
    NENTRIES = 10  #debug
    print "Number of entries in input: ", NENTRIES

    ientry = 0
    nbatches = NENTRIES / batchsize_valid
    if NENTRIES % batchsize_valid != 0:
        nbatches += 1

    for ibatch in range(nbatches):
        data = iovalid.getbatch(batchsize_valid)

        # convert to pytorch Variable (with automatic gradient calc.)
        if GPUMODE:
            images_var = torch.autograd.Variable(data.images.cuda(GPUID))
            labels_var = torch.autograd.Variable(data.labels.cuda(GPUID),
                                                 requires_grad=False)
            weight_var = torch.autograd.Variable(data.weight.cuda(GPUID),
                                                 requires_grad=False)
        else:
            images_var = torch.autograd.Variable(data.images)
            labels_var = torch.autograd.Variable(data.labels,
                                                 requires_grad=False)
            weight_var = torch.autograd.Variable(data.weight,
                                                 requires_grad=False)

        # compute output
        output = model(images_var)
        ev_out_wire = outputdata.get_data(larcv.kProductImage2D, "wire")
        wire_t = images_var.data.cpu().numpy()
        weight_t = weight_var.data.cpu().numpy()
        # get predictions from gpu (turns validation routine into images)
        labels_np = output.data.cpu().numpy().astype(np.float32)
        labels_np = 10**labels_np

        for ib in range(batchsize_valid):
            if ientry >= NENTRIES:
                break
            inputmeta.read_entry(ientry)

            ev_meta = inputmeta.get_data(larcv.kProductImage2D, "wire")
            outmeta = ev_meta.Image2DArray()[2].meta()

            img_slice0 = labels_np[ib, 0, :, :]
            nofill_lcv = larcv.as_image2d_meta(img_slice0, outmeta)
            ev_out = outputdata.get_data(larcv.kProductImage2D, "class0")
            ev_out.Append(nofill_lcv)

            img_slice1 = labels_np[ib, 1, :, :]
            fill_lcv = larcv.as_image2d_meta(img_slice1, outmeta)
            ev_out = outputdata.get_data(larcv.kProductImage2D, "class1")
            ev_out.Append(fill_lcv)

            wire_slice = wire_t[ib, 0, :, :]
            wire_out = larcv.as_image2d_meta(wire_slice, outmeta)
            ev_out_wire.Append(wire_out)

            #weight_slice=weight_t[ib,0,:,:]
            #weights_out = larcv.as_image2d_meta(weight_slice,outmeta)
            #ev_out_weights.Append( weights_out )

            outputdata.set_id(1, 1, ibatch * batchsize_valid + ib)
            outputdata.save_entry()
            ientry += 1

    # save results
    outputdata.finalize()
Example #19
0
    def make_reply(self, request, nreplies):
        """we load each image and pass it through the net.
        we run one batch before sending off partial reply.
        the attribute self._still_processing_msg is used to tell us if we
        are still in the middle of a reply.
        """
        #print("DummyPyWorker. Sending client message back")
        self._log.debug("received message with {} parts".format(len(request)))

        if not self.is_model_loaded():
            self._log.debug("model not loaded for some reason. loading.")

        try:
            import torch
        except:
            raise RuntimeError("could not load pytorch!")

        # message pattern: [image_bson,image_bson,...]

        nmsgs = len(request)
        nbatches = nmsgs / self.batch_size

        if not self._still_processing_msg:
            self._next_msg_id = 0

        # turn message pieces into numpy arrays
        img2d_v = []
        sizes = []
        frames_used = []
        rseid_v = []
        for imsg in xrange(self._next_msg_id, nmsgs):
            try:
                compressed_data = str(request[imsg])
                data = zlib.decompress(compressed_data)
                c_run = c_int()
                c_subrun = c_int()
                c_event = c_int()
                c_id = c_int()
                img2d = larcv.json.image2d_from_pystring(
                    data, c_run, c_subrun, c_event, c_id)
            except:
                self._log.error("Image Data in message part {}\
                                could not be converted".format(imsg))
                continue
            self._log.debug("Image[{}] converted: {}"\
                            .format(imsg,img2d.meta().dump()))

            # check if correct plane!
            if img2d.meta().plane() != self.plane:
                self._log.debug("Image[{}] is the wrong plane!".format(imsg))
                continue

            # check that same size as previous images
            imgsize = (int(img2d.meta().cols()), int(img2d.meta().rows()))
            if len(sizes) == 0:
                sizes.append(imgsize)
            elif len(sizes) > 0 and imgsize not in sizes:
                self._log.debug("Next image a different size. \
                                    we do not continue batch.")
                self._next_msg_id = imsg
                break
            img2d_v.append(img2d)
            frames_used.append(imsg)
            rseid_v.append(
                (c_run.value, c_subrun.value, c_event.value, c_id.value))
            if len(img2d_v) >= self.batch_size:
                self._next_msg_id = imsg + 1
                break

        # convert the images into numpy arrays
        nimgs = len(img2d_v)
        self._log.debug(
            "converted msgs into batch of {} images. frames={}".format(
                nimgs, frames_used))
        np_dtype = np.float32
        img_batch_np = np.zeros((nimgs, 1, sizes[0][1], sizes[0][0]),
                                dtype=np_dtype)

        for iimg, img2d in enumerate(img2d_v):
            meta = img2d.meta()
            img2d_np = larcv.as_ndarray( img2d )\
                            .reshape( (1,1,meta.cols(),meta.rows()))

            img2d_np = np.transpose(img2d_np, (0, 1, 3, 2))
            img_batch_np[iimg, :] = img2d_np

            # print("shape of image: ",img2d_np.shape)

        # now make into torch tensor
        img2d_batch_t = torch.from_numpy(img_batch_np).to(self.device)
        # out_batch_np = img2d_batch_t.detach().cpu().numpy()
        # out_batch_np=np.transpose(out_batch_np,(0,1,3,2))

        print("shape of image: ", img2d_batch_t.shape)
        with torch.set_grad_enabled(False):
            out_batch_np = self.model.forward(
                img2d_batch_t).detach().cpu().numpy()
            out_batch_np = np.transpose(out_batch_np, (0, 1, 3, 2))

        # compression techniques
        ## 1) threshold values to zero
        ## 2) suppress output for non-adc values
        ## 3) use half

        # suppress small values
        out_batch_np[out_batch_np < 1.0e-3] = 0.0

        # threshold
        # for ich in xrange(out_batch_np.shape[1]):
        #     out_batch_np[:,ich,:,:][ img_batch_np[:,0,:,:]<10.0 ] = 0.0

        # convert back to full precision, if we used half-precision in the net

        self._log.debug(
            "passed images through net. output batch shape={}".format(
                out_batch_np.shape))
        # convert from numpy array batch back to image2d and messages
        reply = []
        for iimg in xrange(out_batch_np.shape[0]):
            img2d = img2d_v[iimg]
            rseid = rseid_v[iimg]
            meta = img2d.meta()

            out_np = out_batch_np[iimg, 0, :, :]
            # print("out_np",type(out_np))
            # print("meta",type(meta))
            out_img2d = larcv.as_image2d_meta(out_np, meta)
            bson = larcv.json.as_pystring(out_img2d, rseid[0], rseid[1],
                                          rseid[2], rseid[3])
            compressed = zlib.compress(bson)
            reply.append(compressed)

        if self._next_msg_id >= nmsgs:
            isfinal = True
            self._still_processing_msg = False
        else:
            isfinal = False
            self._still_processing_msg = True

        self._log.debug("formed reply with {} frames. isfinal={}".format(
            len(reply), isfinal))
        return reply, isfinal
Example #20
0
    def generate_reply(self):
        """
        we run the network
        """

        reply = []
        totmsgsize = 0.0
        totcompsize = 0.0
        for key, shape in self.shape_dict.items():

            name = key[0]
            planeid = key[1]

            dummy = np.zeros(shape, dtype=np.float32)
            meta = self.meta_dict[key]
            img = self.image_dict[key]
            rse = self.rse_dict[key]

            # rse = "(0,0,0)" # for debug: purposely send the wrong rse back

            msg_batchsize = shape[0]

            # prepare numpy array for output
            # note, we through away the background scores to save egress data
            # we save results in half-precision. since numbers between [0,1] precision still good to 2^-11 at worse
            if not self.reply_in_float16:
                ssnetout = np.zeros(
                    (shape[0], self.NCLASSES - 1, shape[2], shape[3]),
                    dtype=np.float16)
            else:
                ssnetout = np.zeros(
                    (shape[0], self.NCLASSES - 1, shape[2], shape[3]),
                    dtype=np.float32)

            blobshape = (self.BATCHSIZE, 1, shape[2], shape[3])

            # run the net for the plane
            self.nets[planeid].blobs['data'].reshape(*blobshape)

            # process the images
            for ibatch in range(0, msg_batchsize, self.BATCHSIZE):
                imgslice = img[ibatch * self.BATCHSIZE:(ibatch + 1) *
                               self.BATCHSIZE, :]
                self.nets[planeid].blobs['data'].data[...] = imgslice
                self.nets[planeid].forward()
                if (ibatch + 1) * self.BATCHSIZE > msg_batchsize:
                    remaining = msg_batchsize % self.BATCHSIZE
                    start = ibatch * self.BATCHSIZE
                    end = ibatch * self.BATCHSIZE + remaining
                    if self.reply_in_float16:
                        ssnetout[start:end, :] = self.nets[planeid].blobs[
                            'softmax'].data[0:remaining, :].astype(np.float16)
                    else:
                        ssnetout[start:end, :] = self.nets[planeid].blobs[
                            'softmax'].data[0:remaining, :]
                else:
                    start = ibatch * self.BATCHSIZE
                    end = (ibatch + 1) * self.BATCHSIZE
                    if self.reply_in_float16:
                        ssnetout[start:end, :] = self.nets[planeid].blobs[
                            'softmax'].data[0:self.BATCHSIZE,
                                            1:, :].astype(np.float16)
                    else:
                        ssnetout[start:end, :] = self.nets[planeid].blobs[
                            'softmax'].data[0:self.BATCHSIZE, 1:, :]

                # we threshold score images so compression performs better
                outslice = ssnetout[start:end, :]
                for c in range(outslice.shape[1]):
                    chslice = outslice[:, c, :].reshape(
                        (1, 1, imgslice.shape[2], imgslice.shape[3]))
                    chslice[imgslice < 10.0] = 0

            # encode
            if self.decoder == "msgpack":
                x_enc = msgpack.packb(ssnetout, default=m.encode)
                x_comp = zlib.compress(x_enc, self.compression_level)

                # for debug: inspect compression gains (usually reduction to 1% or lower of original size)
                if self.print_msg_size:
                    encframe = zmq.Frame(x_enc)
                    comframe = zmq.Frame(x_comp)
                    totmsgsize += len(encframe.bytes)
                    totcompsize += len(comframe.bytes)

                reply.append(name.encode('utf-8'))
                reply.append(meta.dump().strip() + ":" + rse)
                reply.append(x_comp)

                # make the return message
                print "CaffeLArCV1Worker[{}] preparing (msgpack) reply for name=\"{}\" shape={} meta={} rse={}".format(
                    self._identity, name, ssnetout.shape,
                    meta.dump().strip(), rse)
            elif self.decoder == "tmessage":
                # we have to serialize both channels
                # make track image
                imgtrack = larcv.as_image2d_meta(ssnet[0, 0, :, :], meta)
                imgshower = larcv.as_image2d_meta(ssnet[0, 1, :, :], meta)

                tmsgtrack = rt.TMessage()
                tmsgtrack.WriteObject(imgtrack)
                msgtrack = tmsgtrack.Buffer()

                tmsgshower = rt.TMessage()
                tmsgshower.WriteObject(imgshower)
                msgshower = tmsgshower.Buffer()

                reply.append((name + "_track").encode('utf-8'))
                reply.append(meta.dump().strip())
                reply.append(msgtrack)

                reply.append((name + "_shower").encode('utf-8'))
                reply.append(meta.dump().strip())
                reply.append(msgshower)

        if self.print_msg_size:
            print "CaffeLArCV1Worker[{}] finished reply for name=\"{}\". size of array portion={} MB (uncompressed {} MB)".format(
                self._identity, name, totcompsize / 1.0e6, totmsgsize / 1.0e6)

        return reply