Example #1
0
    def create_pdf(self, iterable, tbins, trange, qbins, qrange, nreps=1):
        """Returns tuple: 1D array of channel hit counts, 3D array of
        (channel, time, charge) pdfs."""
        first_element, iterable = itertoolset.peek(iterable)

        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)

        pdf_config = (tbins, trange, qbins, qrange)
        if pdf_config != self.pdf_config:
            self.pdf_config = pdf_config
            self.gpu_pdf.setup_pdf(self.detector.num_channels(), tbins, trange,
                                   qbins, qrange)
        else:
            self.gpu_pdf.clear_pdf()

        if nreps > 1:
            iterable = itertoolset.repeating_iterator(iterable, nreps)

        for ev in iterable:
            gpu_photons = gpu.GPUPhotons(ev.photons_beg)
            gpu_photons.propagate(self.gpu_geometry, self.rng_states,
                                  nthreads_per_block=self.nthreads_per_block,
                                  max_blocks=self.max_blocks)
            self.gpu_daq.begin_acquire()
            self.gpu_daq.acquire(gpu_photons, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks)
            gpu_channels = self.gpu_daq.end_acquire()
            self.gpu_pdf.add_hits_to_pdf(gpu_channels)
        
        return self.gpu_pdf.get_pdfs()
Example #2
0
    def create_pdf(self, iterable, tbins, trange, qbins, qrange, nreps=1):
        """Returns tuple: 1D array of channel hit counts, 3D array of
        (channel, time, charge) pdfs."""
        first_element, iterable = itertoolset.peek(iterable)

        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)

        pdf_config = (tbins, trange, qbins, qrange)
        if pdf_config != self.pdf_config:
            self.pdf_config = pdf_config
            self.gpu_pdf.setup_pdf(self.detector.num_channels(), tbins, trange,
                                   qbins, qrange)
        else:
            self.gpu_pdf.clear_pdf()

        if nreps > 1:
            iterable = itertoolset.repeating_iterator(iterable, nreps)

        for ev in iterable:
            gpu_photons = gpu.GPUPhotons(ev.photons_beg)
            gpu_photons.propagate(self.gpu_geometry,
                                  self.rng_states,
                                  nthreads_per_block=self.nthreads_per_block,
                                  max_blocks=self.max_blocks)
            self.gpu_daq.begin_acquire()
            self.gpu_daq.acquire(gpu_photons,
                                 self.rng_states,
                                 nthreads_per_block=self.nthreads_per_block,
                                 max_blocks=self.max_blocks)
            gpu_channels = self.gpu_daq.end_acquire()
            self.gpu_pdf.add_hits_to_pdf(gpu_channels)

        return self.gpu_pdf.get_pdfs()
Example #3
0
    def eval_kernel(self, event_channels,
                    kernel_iterable,
                    trange, qrange, 
                    nreps=1, ndaq=1, naverage=1, time_only=True):
        """Returns tuple: 1D array of channel hit counts, 1D array of PDF
        probability densities."""

        self.gpu_pdf_kernel.setup_kernel(event_channels.hit,
                                         event_channels.t,
                                         event_channels.q)
        first_element, kernel_iterable = itertoolset.peek(kernel_iterable)
        if isinstance(first_element, event.Event):
            kernel_iterable = \
                self.photon_generator.generate_events(kernel_iterable)

        # Evaluate likelihood using this bandwidth
        for ev in kernel_iterable:
            gpu_photons = gpu.GPUPhotons(ev.photons_beg, ncopies=nreps)
            gpu_photons.propagate(self.gpu_geometry, self.rng_states,
                                  nthreads_per_block=self.nthreads_per_block,
                                  max_blocks=self.max_blocks)
            for gpu_photon_slice in gpu_photons.iterate_copies():
                for idaq in xrange(ndaq):
                    self.gpu_daq.begin_acquire()
                    self.gpu_daq.acquire(gpu_photon_slice, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks)
                    gpu_channels = self.gpu_daq.end_acquire()
                    self.gpu_pdf_kernel.accumulate_kernel(gpu_channels)
        
        return self.gpu_pdf_kernel.get_kernel_eval()
Example #4
0
 def setup_kernel(self, event_channels, bandwidth_iterable,
                      trange, qrange, 
                      nreps=1, ndaq=1, time_only=True, scale_factor=1.0):
     '''Call this before calling eval_pdf_kernel().  Sets up the
     event information and computes an appropriate kernel bandwidth'''
     nchannels = len(event_channels.hit)
     self.gpu_pdf_kernel.setup_moments(nchannels, trange, qrange,
                                       time_only=time_only)
     # Compute bandwidth
     first_element, bandwidth_iterable = itertoolset.peek(bandwidth_iterable)
     if isinstance(first_element, event.Event):
         bandwidth_iterable = \
             self.photon_generator.generate_events(bandwidth_iterable)
     for ev in bandwidth_iterable:
         gpu_photons = gpu.GPUPhotons(ev.photons_beg, ncopies=nreps)
         gpu_photons.propagate(self.gpu_geometry, self.rng_states,
                               nthreads_per_block=self.nthreads_per_block,
                               max_blocks=self.max_blocks)
         for gpu_photon_slice in gpu_photons.iterate_copies():
             for idaq in xrange(ndaq):
                 self.gpu_daq.begin_acquire()
                 self.gpu_daq.acquire(gpu_photon_slice, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks)
                 gpu_channels = self.gpu_daq.end_acquire()
                 self.gpu_pdf_kernel.accumulate_moments(gpu_channels)
         
     self.gpu_pdf_kernel.compute_bandwidth(event_channels.hit,
                                           event_channels.t,
                                           event_channels.q,
                                           scale_factor=scale_factor)
Example #5
0
    def eval_kernel(self, event_channels,
                    kernel_iterable,
                    trange, qrange, 
                    nreps=1, ndaq=1, naverage=1, time_only=True):
        """Returns tuple: 1D array of channel hit counts, 1D array of PDF
        probability densities."""

        self.gpu_pdf_kernel.setup_kernel(event_channels.hit,
                                         event_channels.t,
                                         event_channels.q)
        first_element, kernel_iterable = itertoolset.peek(kernel_iterable)
        if isinstance(first_element, event.Event):
            kernel_iterable = \
                self.photon_generator.generate_events(kernel_iterable)

        # Evaluate likelihood using this bandwidth
        for ev in kernel_iterable:
            gpu_photons = gpu.GPUPhotons(ev.photons_beg, ncopies=nreps)
            gpu_photons.propagate(self.gpu_geometry, self.rng_states,
                                  nthreads_per_block=self.nthreads_per_block,
                                  max_blocks=self.max_blocks)
            for gpu_photon_slice in gpu_photons.iterate_copies():
                for idaq in xrange(ndaq):
                    self.gpu_daq.begin_acquire()
                    self.gpu_daq.acquire(gpu_photon_slice, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks)
                    gpu_channels = self.gpu_daq.end_acquire()
                    self.gpu_pdf_kernel.accumulate_kernel(gpu_channels)
        
        return self.gpu_pdf_kernel.get_kernel_eval()
Example #6
0
 def setup_kernel(self, event_channels, bandwidth_iterable,
                      trange, qrange, 
                      nreps=1, ndaq=1, time_only=True, scale_factor=1.0):
     '''Call this before calling eval_pdf_kernel().  Sets up the
     event information and computes an appropriate kernel bandwidth'''
     nchannels = len(event_channels.hit)
     self.gpu_pdf_kernel.setup_moments(nchannels, trange, qrange,
                                       time_only=time_only)
     # Compute bandwidth
     first_element, bandwidth_iterable = itertoolset.peek(bandwidth_iterable)
     if isinstance(first_element, event.Event):
         bandwidth_iterable = \
             self.photon_generator.generate_events(bandwidth_iterable)
     for ev in bandwidth_iterable:
         gpu_photons = gpu.GPUPhotons(ev.photons_beg, ncopies=nreps)
         gpu_photons.propagate(self.gpu_geometry, self.rng_states,
                               nthreads_per_block=self.nthreads_per_block,
                               max_blocks=self.max_blocks)
         for gpu_photon_slice in gpu_photons.iterate_copies():
             for idaq in xrange(ndaq):
                 self.gpu_daq.begin_acquire()
                 self.gpu_daq.acquire(gpu_photon_slice, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks)
                 gpu_channels = self.gpu_daq.end_acquire()
                 self.gpu_pdf_kernel.accumulate_moments(gpu_channels)
         
     self.gpu_pdf_kernel.compute_bandwidth(event_channels.hit,
                                           event_channels.t,
                                           event_channels.q,
                                           scale_factor=scale_factor)
Example #7
0
    def simulate(self, iterable, keep_photons_beg=False,
                 keep_photons_end=False, run_daq=True, max_steps=100 ):

        try:
            if isinstance(iterable, event.Photons):
                raise TypeError # Kludge because Photons looks iterable
            else:
                first_element, iterable = itertoolset.peek(iterable)
        except TypeError:
            first_element, iterable = iterable, [iterable]


        t_photon_start = time.time()
        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)
        elif isinstance(first_element, event.Photons):
            iterable = (event.Event(photons_beg=x) for x in iterable)
        elif isinstance(first_element, GPUPhotons):
            print "GPU Photons"
            iterable = (event.Event(photons_beg=x) for x in iterable) # hacky!!!
        elif isinstance(first_element, event.Vertex):
            iterable = (event.Event(primary_vertex=vertex, vertices=[vertex]) for vertex in iterable)
            iterable = self.photon_generator.generate_events(iterable)
        t_photon_end = time.time()
        print "Photon Load Time: ",t_photon_end-t_photon_start," sec"

        for ev in iterable:
            photons = ev.photons_beg
            if isinstance(photons,event.Photons):
                gpu_photons = GPUPhotons(photons,cl_context=self.context)
            elif isinstance(photons,GPUPhotons):
                gpu_photons = photons
                ev.photons_beg = photons.get()
            gpu_photons.propagate(self.gpu_geometry, self.rng_states,
                                  nthreads_per_block=self.nthreads_per_block,
                                  max_blocks=self.max_blocks,
                                  max_steps=max_steps, cl_context=self.context)
            ev.nphotons = len(ev.photons_beg.pos)

            if not keep_photons_beg:
                ev.photons_beg = None

            if keep_photons_end:
                ev.photons_end = gpu_photons.get()

            # Skip running DAQ if we don't have one
            if hasattr(self, 'gpu_daq') and run_daq:
                t_daq_start = time.time()
                self.gpu_daq.begin_acquire( cl_context=self.context )
                self.gpu_daq.acquire(gpu_photons, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks, cl_context=self.context )
                gpu_channels = self.gpu_daq.end_acquire( cl_context=self.context )
                ev.channels = gpu_channels.get()
                t_daq_end = time.time()
                print "DAQ readout time: ",t_daq_end-t_daq_start," sec"

            yield ev
Example #8
0
    def simulate(self,
                 iterable,
                 keep_photons_beg=False,
                 keep_photons_end=False,
                 keep_hits=True,
                 keep_flat_hits=True,
                 run_daq=False,
                 max_steps=1000,
                 photons_per_batch=1000000):
        if isinstance(iterable, event.Photons):
            first_element, iterable = iterable, [iterable]
        else:
            first_element, iterable = itertoolset.peek(iterable)

        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)
        elif isinstance(first_element, event.Photons):
            iterable = (event.Event(photons_beg=x) for x in iterable)
        elif isinstance(first_element, event.Vertex):
            iterable = (event.Event(vertices=[vertex]) for vertex in iterable)
            iterable = self.photon_generator.generate_events(iterable)

        nphotons = 0
        batch_events = []

        for ev in iterable:

            ev.nphotons = len(ev.photons_beg)
            ev.photons_beg.evidx[:] = len(batch_events)

            nphotons += ev.nphotons
            batch_events.append(ev)

            #FIXME need an alternate implementation to split an event that is too large
            if nphotons >= photons_per_batch:
                yield from self._simulate_batch(
                    batch_events,
                    keep_photons_beg=keep_photons_beg,
                    keep_photons_end=keep_photons_end,
                    keep_hits=keep_hits,
                    keep_flat_hits=keep_flat_hits,
                    run_daq=run_daq,
                    max_steps=max_steps)
                nphotons = 0
                batch_events = []

        if len(batch_events) != 0:
            yield from self._simulate_batch(batch_events,
                                            keep_photons_beg=keep_photons_beg,
                                            keep_photons_end=keep_photons_end,
                                            keep_hits=keep_hits,
                                            keep_flat_hits=keep_flat_hits,
                                            run_daq=run_daq,
                                            max_steps=max_steps)
Example #9
0
    def simulate(self,
                 iterable,
                 keep_photons_beg=False,
                 keep_photons_end=False,
                 keep_hits=True,
                 run_daq=False,
                 max_steps=100):
        if isinstance(iterable, event.Photons):
            first_element, iterable = iterable, [iterable]
        else:
            first_element, iterable = itertoolset.peek(iterable)

        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)
        elif isinstance(first_element, event.Photons):
            iterable = (event.Event(photons_beg=x) for x in iterable)
        elif isinstance(first_element, event.Vertex):
            iterable = (event.Event(vertices=[vertex]) for vertex in iterable)
            iterable = self.photon_generator.generate_events(iterable)

        for ev in iterable:
            gpu_photons = gpu.GPUPhotons(ev.photons_beg)

            gpu_photons.propagate(self.gpu_geometry,
                                  self.rng_states,
                                  nthreads_per_block=self.nthreads_per_block,
                                  max_blocks=self.max_blocks,
                                  max_steps=max_steps)

            ev.nphotons = len(ev.photons_beg.pos)

            if not keep_photons_beg:
                ev.photons_beg = None

            if keep_photons_end:
                ev.photons_end = gpu_photons.get()

            if hasattr(self.detector, 'num_channels') and keep_hits:
                ev.hits = gpu_photons.get_hits(self.gpu_geometry)

            # Skip running DAQ if we don't have one
            # Disabled by default because incredibly special-case
            if hasattr(self, 'gpu_daq') and run_daq:
                self.gpu_daq.begin_acquire()
                self.gpu_daq.acquire(
                    gpu_photons,
                    self.rng_states,
                    nthreads_per_block=self.nthreads_per_block,
                    max_blocks=self.max_blocks)
                gpu_channels = self.gpu_daq.end_acquire()
                ev.channels = gpu_channels.get()

            yield ev
Example #10
0
    def simulate(self, iterable, keep_photons_beg=False,
                 keep_photons_end=False, keep_hits=True, run_daq=False, max_steps=100):
        if isinstance(iterable, event.Photons):
            first_element, iterable = iterable, [iterable]
        else:
            first_element, iterable = itertoolset.peek(iterable)
        

        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)
        elif isinstance(first_element, event.Photons):
            iterable = (event.Event(photons_beg=x) for x in iterable)
        elif isinstance(first_element, event.Vertex):
            iterable = (event.Event(vertices=[vertex]) for vertex in iterable)
            iterable = self.photon_generator.generate_events(iterable)

        for ev in iterable:
            gpu_photons = gpu.GPUPhotons(ev.photons_beg)

            gpu_photons.propagate(self.gpu_geometry, self.rng_states,
                                  nthreads_per_block=self.nthreads_per_block,
                                  max_blocks=self.max_blocks,
                                  max_steps=max_steps)

            ev.nphotons = len(ev.photons_beg.pos)

            if not keep_photons_beg:
                ev.photons_beg = None

            if keep_photons_end:
                ev.photons_end = gpu_photons.get()
            
            if hasattr(self.detector, 'num_channels') and keep_hits:
                ev.hits = gpu_photons.get_hits(self.gpu_geometry)
                
            # Skip running DAQ if we don't have one
            # Disabled by default because incredibly special-case
            if hasattr(self, 'gpu_daq') and run_daq:
                self.gpu_daq.begin_acquire()
                self.gpu_daq.acquire(gpu_photons, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks)
                gpu_channels = self.gpu_daq.end_acquire()
                ev.channels = gpu_channels.get()

            yield ev
Example #11
0
    def eval_pdf(self,
                 event_channels,
                 iterable,
                 min_twidth,
                 trange,
                 min_qwidth,
                 qrange,
                 min_bin_content=100,
                 nreps=1,
                 ndaq=1,
                 nscatter=1,
                 time_only=True):
        """Returns tuple: 1D array of channel hit counts, 1D array of PDF
        probability densities."""
        ndaq_per_rep = 64
        ndaq_reps = ndaq // ndaq_per_rep
        gpu_daq = gpu.GPUDaq(self.gpu_geometry, ndaq=ndaq_per_rep)

        self.gpu_pdf.setup_pdf_eval(event_channels.hit,
                                    event_channels.t,
                                    event_channels.q,
                                    min_twidth,
                                    trange,
                                    min_qwidth,
                                    qrange,
                                    min_bin_content=min_bin_content,
                                    time_only=True)

        first_element, iterable = itertoolset.peek(iterable)

        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)
        elif isinstance(first_element, event.Photons):
            iterable = (event.Event(photons_beg=x) for x in iterable)

        for ev in iterable:
            gpu_photons_no_scatter = gpu.GPUPhotons(ev.photons_beg,
                                                    ncopies=nreps)
            gpu_photons_scatter = gpu.GPUPhotons(ev.photons_beg,
                                                 ncopies=nreps * nscatter)
            gpu_photons_no_scatter.propagate(
                self.gpu_geometry,
                self.rng_states,
                nthreads_per_block=self.nthreads_per_block,
                max_blocks=self.max_blocks,
                use_weights=True,
                scatter_first=-1,
                max_steps=10)
            gpu_photons_scatter.propagate(
                self.gpu_geometry,
                self.rng_states,
                nthreads_per_block=self.nthreads_per_block,
                max_blocks=self.max_blocks,
                use_weights=True,
                scatter_first=1,
                max_steps=5)
            nphotons = gpu_photons_no_scatter.true_nphotons  # same for scatter
            for i in range(gpu_photons_no_scatter.ncopies):
                start_photon = i * nphotons
                gpu_photon_no_scatter_slice = gpu_photons_no_scatter.select(
                    event.SURFACE_DETECT,
                    start_photon=start_photon,
                    nphotons=nphotons)
                gpu_photon_scatter_slices = [
                    gpu_photons_scatter.select(
                        event.SURFACE_DETECT,
                        start_photon=(nscatter * i + j) * nphotons,
                        nphotons=nphotons) for j in range(nscatter)
                ]

                if len(gpu_photon_no_scatter_slice) == 0:
                    continue

                #weights = gpu_photon_slice.weights.get()
                #print 'weights', weights.min(), weights.max()
                for j in range(ndaq_reps):
                    gpu_daq.begin_acquire()
                    gpu_daq.acquire(gpu_photon_no_scatter_slice,
                                    self.rng_states,
                                    nthreads_per_block=self.nthreads_per_block,
                                    max_blocks=self.max_blocks)
                    for scatter_slice in gpu_photon_scatter_slices:
                        gpu_daq.acquire(
                            scatter_slice,
                            self.rng_states,
                            nthreads_per_block=self.nthreads_per_block,
                            max_blocks=self.max_blocks,
                            weight=1.0 / nscatter)
                    gpu_channels = gpu_daq.end_acquire()
                    self.gpu_pdf.accumulate_pdf_eval(
                        gpu_channels, nthreads_per_block=ndaq_per_rep)

        return self.gpu_pdf.get_pdf_eval()
Example #12
0
    def eval_pdf(self, event_channels, iterable, min_twidth, trange, min_qwidth, qrange, min_bin_content=100, nreps=1, ndaq=1, nscatter=1, time_only=True):
        """Returns tuple: 1D array of channel hit counts, 1D array of PDF
        probability densities."""
        ndaq_per_rep = 64
        ndaq_reps = ndaq // ndaq_per_rep
        gpu_daq = gpu.GPUDaq(self.gpu_geometry, ndaq=ndaq_per_rep)

        self.gpu_pdf.setup_pdf_eval(event_channels.hit,
                                    event_channels.t,
                                    event_channels.q,
                                    min_twidth,
                                    trange,
                                    min_qwidth,
                                    qrange,
                                    min_bin_content=min_bin_content,
                                    time_only=True)

        first_element, iterable = itertoolset.peek(iterable)

        if isinstance(first_element, event.Event):
            iterable = self.photon_generator.generate_events(iterable)
        elif isinstance(first_element, event.Photons):
            iterable = (event.Event(photons_beg=x) for x in iterable)

        for ev in iterable:
            gpu_photons_no_scatter = gpu.GPUPhotons(ev.photons_beg, ncopies=nreps)
            gpu_photons_scatter = gpu.GPUPhotons(ev.photons_beg, ncopies=nreps*nscatter)
            gpu_photons_no_scatter.propagate(self.gpu_geometry, self.rng_states,
                                             nthreads_per_block=self.nthreads_per_block,
                                             max_blocks=self.max_blocks,
                                             use_weights=True,
                                             scatter_first=-1,
                                             max_steps=10)
            gpu_photons_scatter.propagate(self.gpu_geometry, self.rng_states,
                                          nthreads_per_block=self.nthreads_per_block,
                                          max_blocks=self.max_blocks,
                                          use_weights=True,
                                          scatter_first=1,
                                          max_steps=5)
            nphotons = gpu_photons_no_scatter.true_nphotons # same for scatter
            for i in xrange(gpu_photons_no_scatter.ncopies):
                start_photon = i * nphotons
                gpu_photon_no_scatter_slice = gpu_photons_no_scatter.select(event.SURFACE_DETECT,
                                                                            start_photon=start_photon,
                                                                            nphotons=nphotons)
                gpu_photon_scatter_slices = [gpu_photons_scatter.select(event.SURFACE_DETECT,
                                                                        start_photon=(nscatter*i+j)*nphotons,
                                                                        nphotons=nphotons)
                                             for j in xrange(nscatter)]
                
                if len(gpu_photon_no_scatter_slice) == 0:
                    continue

                #weights = gpu_photon_slice.weights.get()
                #print 'weights', weights.min(), weights.max()
                for j in xrange(ndaq_reps):
                    gpu_daq.begin_acquire()
                    gpu_daq.acquire(gpu_photon_no_scatter_slice, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks)
                    for scatter_slice in gpu_photon_scatter_slices:
                        gpu_daq.acquire(scatter_slice, self.rng_states, nthreads_per_block=self.nthreads_per_block, max_blocks=self.max_blocks, weight=1.0/nscatter)
                    gpu_channels = gpu_daq.end_acquire()
                    self.gpu_pdf.accumulate_pdf_eval(gpu_channels, nthreads_per_block=ndaq_per_rep)
        
        return self.gpu_pdf.get_pdf_eval()