def process_message(self, frames):
        """ we expect a batch for each plane 
        """

        # remake arrays
        self.msg_dict = {}
        parts = len(frames)
        for i in range(0, parts, 3):
            # parse frames
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_comp = frames[i + 2]
            #x_enc  = zlib.decompress(x_comp)

            # decode frames

            # -- meta
            meta = decode_larcv1_metamsg(metamsg)

            key = (name, meta.plane())
            self.msg_dict[key] = [frames[i], metamsg, x_comp]

            print "CaffeLArCV1ThreadedWorker[{}] received array name=\"{}\"  meta={}".format(
                self._identity, name,
                meta.dump().strip())

        return "Thanks!"
示例#2
0
    def process_message(self, frames):

        # remake arrays
        self.shape_dict = None
        self.meta_dict = None
        parts = len(frames)
        for i in range(0, parts, 3):
            # parse frames
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_enc = frames[i + 2]

            # decode frames

            # -- meta
            meta = decode_larcv1_metamsg(metamsg)

            # -- array
            arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            if self.shape_dict is None:
                self.shape_dict = {}
                self.shape_dict["dummy"] = arr.shape
            if self.meta_dict is None:
                self.meta_dict = {}
                self.meta_dict["dummy"] = meta

            print "DummyLArCVWorker[{}] received array name=\"{}\" shape={} meta={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip())

        return "Thanks!"
示例#3
0
    def process_reply(self, frames):
        parts = len(frames)

        # for each batch of images, we save an entry ...
        # how to handle multiple batches? will need a batch to eventid map
        # when eventid changes, then save

        for i in range(0, parts, 3):
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_enc = frames[i + 2]

            meta = decode_larcv1_metamsg(metamsg)
            arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            print "SimpleLArCV1Client[{}] received array name=\"{}\" shape={} meta={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip())
            nbatches = arr.shape[0]

            for ib in range(nbatches):

                rse = self.batch2rse[ib]
                if self.current_rse is not None and rse != self.current_rse:
                    # save output entry
                    self.io_out.set_id(rse[0], rse[1], rse[2])
                    print "SimpleLArCV1Client[{}] saving entry {}".format(
                        self._identity, rse)
                    self.io_out.save_entry()
                    self.io_out.clear_entry()

                self.current_rse = rse

                output_ev_container = self.io_out.get_data(
                    larcv.kProductImage2D, str(name))
                for i in range(arr.shape[1]):
                    meta = decode_larcv1_metamsg(metamsg, i)
                    img = larcv.as_image2d_meta(
                        np.transpose(arr[ib, i, :], (1, 0)), meta)
                    output_ev_container.Append(img)

        # save output entry
        self.io_out.set_id(rse[0], rse[1], rse[2])
        self.io_out.save_entry()
        self.io_out.clear_entry()
        self.current_rse = rse
示例#4
0
    def process_message(self, frames):
        """ we expect a batch for each plane 
        """

        # remake arrays
        self.shape_dict = {}
        self.meta_dict = {}
        self.rse_dict = {}
        self.image_dict = {}
        parts = len(frames)
        for i in range(0, parts, 3):
            # parse frames
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_comp = frames[i + 2]

            # -- decode meta
            print "meta msg: ", metamsg
            meta = decode_larcv1_metamsg(metamsg)
            rse = metamsg.split(":")[-1]

            # -- array
            if self.decoder == "msgpack":
                x_enc = zlib.decompress(x_comp)
                arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            elif self.decoder == "tmessage":
                print type(x_comp), len(x_comp), x_comp
                tmsg = larcv.Image2DTMessage(x_comp, len(x_comp))
                img = tmsg.decode()
                arr = larcv.as_ndarray(img)
            else:
                raise ValueError("Unrecognized decoder: {}".format(
                    self.decoder))

            key = (name, meta.plane())
            if key not in self.image_dict:
                self.image_dict[key] = {}
                self.meta_dict[key] = {}

            self.image_dict[key] = arr
            self.meta_dict[key] = meta
            self.shape_dict[key] = arr.shape
            self.rse_dict[key] = rse

            print "CaffeLArCV1Worker[{}] received array name=\"{}\" shape={} meta={} rse={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip(), rse)

        return "Thanks!"
示例#5
0
    def process_croi_reply(self, frames):

        # message parts consist of ssnet output
        # one part contains a batch for one plane
        # we must collect data for all three planes for an event, before writing it to disk

        # by construction, one batch is for one event
        # one message contains one batch
        # this makes it a lot easier to understand
        # someone smarter can make general code

        # first, set the rse values for the entry
        rse = self.batch2rse[0]
        for pim in self.py_image_makers:
            pim.set_id(rse[0], rse[1], rse[2])

        if frames is None:
            # save an empty event
            self.out_proc.process_entry()
            print "CaffeLArCV1Client[{}] saved an empty entry"
            return

        treply = time.time()

        parts = len(frames)
        for i in range(0, parts, 3):
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            rsemsg = metamsg.split(":")[-1]
            metamsg = metamsg.split(":")[0]
            x_comp = frames[i + 2]
            x_enc = zlib.decompress(x_comp)
            meta = decode_larcv1_metamsg(metamsg)
            arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            nbatches = arr.shape[0]
            print "CaffeLArCV1Client[{}] received array name=\"{}\" shape={} meta={} rse={} batchsize={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip(), rsemsg, nbatches)

            rse_returned = eval(rsemsg)

            if nbatches != 1:
                print "CaffeLArCV1Client[{}] unexpected batchsize!".format(
                    self._identity)
                return False
            if rse_returned != rse:
                print "CaffeLArCV1Client[{}] mismatched RSE! Sent={} vs. Received={}".format(
                    self._identity, rse, rse_returned)
                return False

            planeid = meta.plane()
            # note, the background channel is not sent back
            for out_ch in [0, 1]:
                self.py_image_makers[planeid].append_ndarray_meta(
                    arr[0, out_ch, :], meta, out_ch)

        self.out_proc.process_entry()

        # make output event containers
        print "CaffeLArCV1Client[{}] pyimagestitcher processed message.".format(
            self._identity)

        treply = time.time() - treply
        self._ttracker["savereply::total"] += treply
        return True
示例#6
0
    def process_precropped_reply(self, frames):

        # message parts consist of ssnet output
        # one part contains a batch for one plane
        # we must collect data for all three planes for an event, before writing it to disk

        # by construction, one batch is for one event
        # one message contains one batch
        # this makes it a lot easier to understand
        # someone smarter can make general code

        treply = time.time()

        plane_img_v_dict = {}

        parts = len(frames)
        for i in range(0, parts, 3):
            name = frames[i].decode("ascii")
            metamsg = frames[i + 1]
            x_comp = frames[i + 2]
            x_enc = zlib.decompress(x_comp)

            if name not in plane_img_v_dict:
                plane_img_v_dict[name] = [
                    std.vector("larcv::Image2D")() for x in range(self.NPLANES)
                ]

            meta = decode_larcv1_metamsg(metamsg)
            arr = msgpack.unpackb(x_enc, object_hook=m.decode)
            nbatches = arr.shape[0]
            print "CaffeLArCV1Client[{}] received array name=\"{}\" shape={} meta={} batchsize={}".format(
                self._identity, name, arr.shape,
                meta.dump().strip(), nbatches)

            for ib in range(nbatches):
                # set the RSE
                rse = self.batch2rse[ib]
                self.current_rse = rse

                #img = larcv.as_image2d_meta( np.transpose( arr[ib,0,:], (1,0) ), meta ) # not needed?
                img = larcv.as_image2d_meta(arr[ib, 0, :], meta)
                print "fill ", name, " meta=", meta.dump().strip()
                plane_img_v_dict[name][meta.plane()].push_back(img)

        # make output event containers
        print "CaffeLArCV1Client[{}] storing images".format(self._identity)
        for name, plane_img_v in plane_img_v_dict.items():
            print "name: ", name, plane_img_v
            for img_v in plane_img_v:
                print img_v
                print img_v.size()
                planeid = img_v.front().meta().plane()
                print "CaffeLArCV1Client[{}] storing name={} plane={}".format(
                    self._identity, name, planeid)
                outname = "%s_plane%d" % (str(name.decode("ascii")), planeid)
                print "Filling event container: ", outname
                output_ev_container = self.io_out.get_data(
                    larcv.kProductImage2D, outname)
                for iimg in range(img_v.size()):
                    output_ev_container.Append(img_v[iimg])

        # save output entry
        self.io_out.set_id(rse[0], rse[1], rse[2])
        self.io_out.save_entry()
        self.io_out.clear_entry()
        self.current_rse = rse

        treply = time.time() - treply
        self._ttracker["savereply::total"] += treply
    def process_image(self, frames):

        # parse frames
        name = frames[0]
        metamsg = frames[1]
        x_comp = frames[2]
        x_enc = zlib.decompress(x_comp)

        # decode frames

        # -- meta
        meta = decode_larcv1_metamsg(metamsg)

        # -- array
        arr = msgpack.unpackb(x_enc, object_hook=m.decode)
        shape = arr.shape

        msg_batchsize = shape[0]

        # prepare numpy array for output
        # note, we through away the background scores to save egress data
        # we save results in half-precision. since numbers between [0,1] precision still good to 2^-11 at worse
        if not self.reply_in_float16:
            ssnetout = np.zeros(
                (shape[0], self.NCLASSES - 1, shape[2], shape[3]),
                dtype=np.float16)
        else:
            ssnetout = np.zeros(
                (shape[0], self.NCLASSES - 1, shape[2], shape[3]),
                dtype=np.float32)

        blobshape = (self.BATCHSIZE, 1, shape[2], shape[3])

        # run the net for the plane
        #print "PlaneNetworkWorker[{}]::Plane[{}]: processing {}".format(self._identity,self.PLANEID,name)
        self.net.blobs['data'].reshape(*blobshape)

        # process the images
        for ibatch in range(0, msg_batchsize, self.BATCHSIZE):
            imgslice = arr[ibatch * self.BATCHSIZE:(ibatch + 1) *
                           self.BATCHSIZE, :]
            self.net.blobs['data'].data[...] = imgslice
            tforward = time.time()
            #print "PlaneNetworkWorker[{}]::Plane[{}]: forward".format(self._identity,self.PLANEID)
            self.net.forward()
            tforward = time.time() - tforward
            #print "PlaneNetworkWorker[{}]::Plane[{}]: network returns after %.03f secs".format(self._identity,self.PLANEID)%(tforward)
            # copy predictions to ssnetout
            if (ibatch + 1) * self.BATCHSIZE > msg_batchsize:
                remaining = msg_batchsize % self.BATCHSIZE
                start = ibatch * self.BATCHSIZE
                end = ibatch * self.BATCHSIZE + remaining
                if self.reply_in_float16:
                    ssnetout[start:end, :] = self.net.blobs['softmax'].data[
                        0:remaining, :].astype(np.float16)
                else:
                    ssnetout[start:end, :] = self.net.blobs['softmax'].data[
                        0:remaining, :]
            else:
                start = ibatch * self.BATCHSIZE
                end = (ibatch + 1) * self.BATCHSIZE
                if self.reply_in_float16:
                    ssnetout[start:end, :] = self.net.blobs['softmax'].data[
                        0:self.BATCHSIZE, 1:, :].astype(np.float16)
                else:
                    ssnetout[start:end, :] = self.net.blobs['softmax'].data[
                        0:self.BATCHSIZE, 1:, :]

            # we threshold score images so compression performs better
            outslice = ssnetout[start:end, :]
            for c in range(outslice.shape[1]):
                chslice = outslice[:, c, :].reshape(
                    (1, 1, imgslice.shape[2], imgslice.shape[3]))
                chslice[imgslice < 5.0] = 0

        # encode
        x_enc = msgpack.packb(ssnetout, default=m.encode)
        x_comp = zlib.compress(x_enc, self._compression_level)

        # make the return message
        #print "PlaneNetworkWorker[{}]::Plane[{}] processed name=\"{}\" shape={} meta={}".format(self._identity,self.PLANEID,name,ssnetout.shape,meta.dump().strip())
        reply = ["plane%d" % (self.PLANEID)]  # topic frame
        reply.append(name.encode('utf-8'))
        reply.append(meta.dump().strip())
        reply.append(x_comp)

        return reply