def __init__(self,
              config,
              detectorxbins=10,
              detectorybins=10,
              detectorzbins=10,
              infile=None):
     # If passed infile, will automatically read in the calibrated detector mean angles/sigmas
     DetectorResponse.__init__(self, config, detectorxbins, detectorybins,
                               detectorzbins)
     self.means = np.zeros((3, self.npmt_bins))
     self.sigmas = np.zeros(self.npmt_bins)
     if infile is not None:
         logger.info('Creating detector response / calibration with: %s' %
                     infile)
         if infile.endswith('.h5'):
             try:
                 self.read_from_hdf5(infile)
             except IOError as error:
                 logger.warning(
                     'No calibration file found %s.  Continuing uncalibrated'
                     % infile)
                 return
             if self.config.uuid != self.config_in_cal_file.uuid:
                 logger.critical(
                     'UUID from calibration file does not match configuration: %s %s'
                     % (self.config.uuid, self.config_in_cal_file.uuid))
                 exit(-1)
                 # TODO: Raise an exception
             else:
                 logger.info('Calibration file UUID matches')
         else:
             self.read_from_ROOT(infile)
     else:
         logger.warning(
             'No calibration file specified.  Continuing uncalibrated')
Beispiel #2
0
def print_tracks(tracks, count):
    logger.info('Total track count: %d' % len(tracks))
    for index, track in enumerate(tracks):
        logger.info('Track %d: %s, %s, %f, norm: %f' % (index, str(
            track[0]), str(track[1]), track[2], np.linalg.norm(track[0])))
        if index >= count:
            break
    def save_configuration(self, config):
        global _config_list

        conf_name = config.config_name
        if conf_name in _config_list:
            logger.info('Replacing configuration: ' + conf_name)
        _config_list[conf_name] = config
        self._save_config_list()
        logger.info('Configuration saved: ' + conf_name)
Beispiel #4
0
def AVF_analyze_event(analyzer, event, debug=False):
    sig_cone = 0.01
    lens_dia = None
    n_ph = 0

    vtcs = analyzer.analyze_one_event_AVF(event, sig_cone, n_ph, min_tracks,
                                          chiC, temps, tol, debug, lens_dia)
    logger.info('Vertices: ' + str(vtcs))
    return vtcs
Beispiel #5
0
def plot_tracks_from_endpoints(begin_pos,
                               end_pos,
                               pts=None,
                               highlight_pt=None,
                               path=None,
                               show=True,
                               skip_interval=50,
                               plot_title="Tracks"):
    # Returns a 3D plot of tracks (a Tracks object), as lines extending from their
    # PMT hit position to the inscribed diameter of the detector.
    # If pts is not None, will also draw them (should be a (3,n) numpy array).
    # If highlight_pt exists, it will be colored differently.
    # If path exists, a path will be drawn between its points (should be shape (3,n)).

    hit_pos = end_pos[0::skip_interval].T
    source_pos = begin_pos[0::skip_interval].T

    logger.info('Plotting %d tracks' % len(hit_pos[0]))

    xs = np.vstack((hit_pos[0, :], source_pos[0, :]))
    ys = np.vstack((hit_pos[1, :], source_pos[1, :]))
    zs = np.vstack((hit_pos[2, :], source_pos[2, :]))

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    # Draw track hit positions
    ax.scatter(hit_pos[0, :], hit_pos[1, :], hit_pos[2, :], color='red')
    # Draw tracks as lines
    for ii in range(len(hit_pos[0])):
        ax.plot(xs[:, ii], ys[:, ii], zs[:, ii], color='red')

    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')
    plt.title(plot_title)

    # Draw pts
    if pts is not None:
        ax.scatter(pts[0, :], pts[1, :], pts[2, :], color='blue')

    # Draw highlight_pt, larger and different color
    if highlight_pt is not None:
        ax.scatter(highlight_pt[0],
                   highlight_pt[1],
                   highlight_pt[2],
                   color='green',
                   s=50)

    # Draw path between points in path
    if path is not None:
        ax.plot(path[0, :], path[1, :], path[2, :], color='blue')
        plt.title('Vertex position record')

    if show:
        plt.show()

    return fig
    def __init__(self):
        global _config_list, _config_pickle_file

        if _config_list is None:
            try:
                with open(_configs_pickle_file, 'r') as f:
                    _config_list = pickle.load(f)
                logger.info('Loaded config list: %s' % _configs_pickle_file)
            except IOError:
                _config_list = {}
                self._save_config_list()
Beispiel #7
0
def sph_scatter(sample_count, in_shell, out_shell):
    logger.info('sph_scatter shell radii: ' + str(in_shell) + ' ' +
                str(out_shell))
    loc = np.random.uniform(-out_shell, out_shell, (sample_count, 3))
    while len(
            loc[(np.linalg.norm(loc, axis=1) > in_shell)
                & (np.linalg.norm(loc, axis=1) <= out_shell)]) != sample_count:
        bl_idx = np.logical_not((np.linalg.norm(loc, axis=1) > in_shell)
                                & (np.linalg.norm(loc, axis=1) <= out_shell))
        smpl = sum(bl_idx)
        loc[bl_idx] = np.random.uniform(-out_shell, out_shell, (smpl, 3))
    return loc
def assign_photons(npmt_bins, n_det, pmt_bins):
    start = time.time()
    pmt_photons = np.empty(npmt_bins, dtype=list)
    for photon in range(n_det):
        if photon % 1000000 == 0:
            logger.info("Photon " + str(photon) + " of " + str(n_det) + ': ' +
                        str(time.time() - start))
        pmt = int(pmt_bins[photon])
        if pmt_photons[pmt] is None:
            pmt_photons[pmt] = [photon]
        else:
            pmt_photons[pmt] += [photon]
    return pmt_photons
    def read_from_ROOT(self, filename):
        from ShortIO.root_short import GaussAngleRootReader

        # Read the means and sigmas from a ROOT file
        logger.info('Loading root calibration file: %s' % filename)
        self.is_calibrated = True
        reader = GaussAngleRootReader(filename)
        for bin_ind, mean, sigma in reader:
            self.means[:, bin_ind] = mean
            self.sigmas[bin_ind] = sigma
            if np.isnan(sigma):
                print "Nan read in for bin index " + str(bin_ind)
        logger.info('Last bin_index: %d' % bin_ind)
Beispiel #10
0
def _calibrate(config,
               photons_file,
               detresname,
               detxbins=10,
               detybins=10,
               detzbins=10,
               method="PDF",
               nevents=-1,
               datadir="",
               fast_calibration=True):
    logger.info('Calibrating with: ' + datadir + photons_file)
    if method == "PDF":
        dr = DetectorResponsePDF(
            config, detxbins, detybins,
            detzbins)  # Do we need to continue to carry this?
    elif method == "GaussAngle":
        dr = DetectorResponseGaussAngle(config, detxbins, detybins, detzbins)
    else:
        logger.warning('Warning: using generic DetectorResponse base class.')
        dr = DetectorResponse(config)
    dr.calibrate(datadir + photons_file,
                 datadir,
                 nevents,
                 fast_calibration=fast_calibration)
    logger.info(
        "=== Detector analysis calibration complete.  Writing calibration file"
    )

    if USE_ROOT:
        # In this case write both hdf5 and ROOT files
        dr.write_to_ROOT(datadir + detresname + '.root')

    # Config dict is just included for human readability (currently)
    detector_data = {
        'config': dr.config,
        'config_dict': vars(dr.config),
        'means': dr.means,
        'sigmas': dr.sigmas
    }
    dd.io.save(datadir + detresname + '.h5', detector_data)
Beispiel #11
0
 def write(self, file_name):
     event = {
         'track_tree': self.track_tree,
         'gun': self.gun_specs,
         'config_name': self.config_name,
         'simulation_params': self.simulation_params
     }
     if self.config_name is not None:
         event['config'] = detectorconfig.get_detector_config(
             self.config_name)
     if self.photons is not None:
         event['photons'] = self.photons
     if self.full_event is not None:
         event['full_event'] = self.full_event
     event['tracks'] = self.tracks
     if hasattr(self.tracks, 'hit_pos'):
         event[
             'hit_pos'] = self.tracks.hit_pos  # Note: these are the center of the lens that the photon hit
         event['means'] = self.tracks.means
         event['sigmas'] = self.tracks.sigmas
     logger.info('Writing deepdish file: ' + file_name)
     dd.io.save(file_name, event)
Beispiel #12
0
    def load_from_file(cls, file_name):
        event = dd.io.load(file_name)

        config_name = event['config_name']
        gun_specs = event['gun']
        track_tree = event['track_tree']
        tracks = event['tracks']
        photons = event['photons']
        simulation_params = event[
            'simulation_params'] if 'simulation_params' in event else None
        logger.info('Photon count: ' + str(len(photons)))

        event_file = cls(config_name,
                         gun_specs,
                         track_tree,
                         tracks,
                         photons,
                         simulation_params=simulation_params)
        event_file.full_event = event['full_event']

        # Preserve the whole thing in case we need access to 'hit_pos', 'means', 'sigmas' (for compatibility with the original HDF5 format)
        event_file.complete = event
        return event_file
Beispiel #13
0
def AVF_analyze_tracks(analyzer, tracks, debug=False):
    vtcs = analyzer.AVF(tracks, min_tracks, chiC, temps, tol, debug)
    logger.info('Vertices: ' + str(vtcs))
    return vtcs
Beispiel #14
0
def load_or_build_detector(config,
                           detector_material,
                           g4_detector_parameters,
                           force_build=False):
    configname = config.config_name
    filename_base = paths.detector_config_path + configname
    if not os.path.exists(paths.detector_config_path):
        os.makedirs(paths.detector_config_path)

    kabamland = None
    # How to ensure the material and detector parameters are correct??
    if not force_build:
        try:
            detector_config = dd.io.load(filename_base + '.h5')
            kabamland = detector_config['detector']
            logger.info("** Loaded HDF5 (deepdish) detector configuration: " +
                        configname)
        except IOError as error:  # Will dd throw an exception?
            try:
                with open(filename_base + '.pickle', 'rb') as pickle_file:
                    kabamland = pickle.load(pickle_file)
                    logger.info("** Loaded pickle detector configuration: " +
                                configname)
            except IOError as error:
                pass
    if kabamland is not None:
        config_has_g4_dp = hasattr(
            kabamland, 'g4_detector_parameters'
        ) and kabamland.g4_detector_parameters is not None
        config_has_g4_dm = hasattr(
            kabamland,
            'detector_material') and kabamland.detector_material is not None
        if g4_detector_parameters is not None:
            logger.info('*** Using Geant4 detector parameters specified' +
                        (' - replacement' if config_has_g4_dp else '') +
                        ' ***')
            kabamland.g4_detector_parameters = g4_detector_parameters
        elif config_has_g4_dp:
            logger.info(
                '*** Using Geant4 detector parameters found in loaded file ***'
            )
        else:
            logger.info('*** No Geant4 detector parameters found at all ***')

            if detector_material is not None:
                logger.info('*** Using Geant4 detector material specified' +
                            (' - replacement' if config_has_g4_dm else '') +
                            ' ***')
                kabamland.detector_material = detector_material
            elif config_has_g4_dm:
                logger.info(
                    '*** Using Geant4 detector material found in loaded file ***'
                )
            else:
                logger.info('*** No Geant4 detector material found at all ***')
    else:
        from chroma.loader import load_bvh  # Requires CUDA so only import it when necessary

        logger.info("** Building detector configuration: " + configname)
        kabamland = Detector(lm.create_scintillation_material(),
                             g4_detector_parameters=g4_detector_parameters)
        kbl2.build_kabamland(kabamland, config)
        # view(kabamland)
        kabamland.flatten()
        kabamland.bvh = load_bvh(kabamland,
                                 bvh_name=config.config_name,
                                 read_bvh_cache=(not force_build))
        '''
        try:
            with open(filename_base+'.pickle','wb') as pickle_file:
                pickle.dump(kabamland, pickle_file)
        except IOError as error:
            logger.info("Error writing pickle file: " + filename_base+'.pickle')
        '''

        # Write h5 file with configuration data structure
        logger.info('Saving h5 detector configuration.  UUID: %s' %
                    config.uuid)
        '''   # This was created to minimize what is saved from the Detector object.  But for simplicity, we are currently pickling the whole object.
        detector_dict = {
            'detector_material' : kabamland.detector_material,
            'solids' : kabamland.solids,
            'solid_rotations' : kabamland.solid_rotations,
            'solid_displacements' : kabamland.solid_displacements,
            'bvh' : kabamland.bvh,
            'g4_detector_parameters' : kabamland.g4_detector_parameters,
            'solid_id_to_channel_index' : kabamland.solid_id_to_channel_index,
            'channel_index_to_solid_id' : kabamland.channel_index_to_solid_id,
            'channel_index_to_channel_id' : kabamland.channel_index_to_channel_id,
            'channel_id_to_channel_index' : kabamland.channel_id_to_channel_index,
            'time_cdf' : kabamland.time_cdf,
            'charge_cdf' : kabamland.charge_cdf
        }
        '''
        # TODO: Saving the whole dict and the object is redundant
        # TODO: Also, saving all of kabamland vs. just the parameters above adds about 1 Meg to the file size (I think)
        import lenssystem

        ld_name = configname.split('_')[0][2:]
        lens_design = lenssystem.get_lens_sys(ld_name)
        config_data = {
            'detector_config': config,
            'detector_config_dict': vars(config),
            'lens_config_dict': vars(lens_design)
        }
        detector_data = {'config': config_data, 'detector': kabamland}
        dd.io.save(filename_base + '.h5', detector_data)

    return kabamland
Beispiel #15
0
def simulate_and_calibrate(config,
                           build_only=False,
                           force=False,
                           fast_calibration=True):
    config_name = config.config_name
    if (not force) and os.path.isfile(
            paths.get_calibration_file_name(config_name)):
        logger.info('Found calibration file: %s' %
                    paths.get_calibration_file_name(config_name))
    else:
        logger.info('Failed to find calibration file: ' +
                    paths.get_calibration_file_name(config_name))
        logger.info('==== Step 1: Setting up the detector ====')
        photons_file_base = 'sim-' + config_name + '_100million'
        photons_file_full_path_base = paths.detector_calibration_path + photons_file_base
        if force or (
                not (os.path.exists(photons_file_full_path_base + '.root')
                     or os.path.exists(photons_file_full_path_base + '.h5'))):
            if force:
                logger.info('Forcing detector build')
            logger.info('Starting to load/build: %s' % config_name)
            g4_detector_parameters = G4DetectorParameters(
                orb_radius=7., world_material='G4_Galactic')
            kabamland = utilities.load_or_build_detector(
                config,
                lm.create_scintillation_material(),
                g4_detector_parameters=g4_detector_parameters,
                force_build=force)
            logger.warning('=== Detector was loaded/built')
            if build_only:
                return
            logger.info('==== Simulating photons: %s  ====' %
                        photons_file_base)
            _full_detector_simulation(config,
                                      kabamland,
                                      100000,
                                      photons_file_base,
                                      datadir=paths.detector_calibration_path)
            logger.warning('==== Simulation complete')
            simulation_file = photons_file_base + '.h5'

        elif os.path.exists(
                photons_file_full_path_base + '.h5'
        ):  # TODO: The double if's and constantly adding extensions needs to be reworked
            simulation_file = photons_file_base + '.h5'
        else:  # Fall back to root
            simulation_file = photons_file_base + '.root'
        logger.warning('==== Found/created photons file: %s ====' %
                       simulation_file)

        if not build_only:
            logger.info("==== Step 2: Calibrating  ====")
            _calibrate(
                config,
                simulation_file,
                paths.get_calibration_file_name_base_without_path(config_name),
                method="GaussAngle",
                nevents=10000,
                datadir=paths.detector_calibration_path,
                fast_calibration=fast_calibration)
            #os.remove(photons_file)  # Would need to remove both
            logger.warning(
                '==== Calibration complete: %s %s ====' %
                (config_name, 'fast' if fast_calibration else 'slow'))
Beispiel #16
0
    def __init__(self,
                 config,
                 detectorxbins=10,
                 detectorybins=10,
                 detectorzbins=10):
        # TODO: Duplicates a lot of stuff in the config
        self.config = config  # To enable saving configuration with the calibration file
        self.configname = config.config_name  # Adding this for intermediate calibration file writing
        self.is_calibrated = False
        self.lns_rad = config.half_EPD / config.EPD_ratio
        self.detectorxbins = detectorxbins
        self.detectorybins = detectorybins
        self.detectorzbins = detectorzbins
        #self.edge_length, self.facecoords, self.direction, self.axis, self.angle, self.spin_angle = return_values(config.edge_length, config.base)
        self.pmtxbins = config.pmtxbins
        self.pmtybins = config.pmtybins
        self.n_lens_sys = config.lens_count  # Number of lens systems per face - TODO: not true anymore
        self.detector_r = config.detector_r
        self.nsteps = config.ring_count
        #self.n_triangles_per_surf = int(2*self.nsteps*int((self.nsteps-2)/2.))

        #self.n_pmts_per_surf = int(self.n_triangles_per_surf/2.)
        #if not self.detector_r:
        #    self.npmt_bins = 20*self.pmtxbins*self.pmtybins
        #else:
        #    self.npmt_bins = 20*self.n_lens_sys*self.n_pmts_per_surf # One curved detecting surf for each lens system

        self.diameter_ratio = config.diameter_ratio
        self.thickness_ratio = config.thickness_ratio
        ##changed
        self.focal_length = config.focal_length

        ##end changed
        #self.pmt_side_length = np.sqrt(3)*(3-np.sqrt(5))*self.focal_length
        self.inscribed_radius = config.detector_radius
        #self.rotation_matrices = self.build_rotation_matrices()
        #self.inverse_rotation_matrices = np.linalg.inv(self.rotation_matrices)
        #self.displacement_matrix = self.build_displacement_matrix()
        #self.inverse_rotated_displacement_matrix = self.build_inverse_rotated_displacement_matrix()
        #self.lens_inverse_rotated_displacement_matrix = self.build_lensplane_inverse_rotated_displacement_matrix()
        #new properties for curved surface detectors

        # Comment this out to allow access to old calibration files
        self.triangle_centers, self.n_triangles_per_surf, self.ring = get_curved_surf_triangle_centers(
            config.vtx, self.lns_rad, self.detector_r, self.focal_length,
            self.nsteps, config.base_pixels)
        self.triangle_centers_tree = spatial.cKDTree(self.triangle_centers)
        self.n_pmts_per_surf = int(self.n_triangles_per_surf / 2.)

        if not self.detector_r:
            self.npmt_bins = 20 * self.pmtxbins * self.pmtybins
        else:
            self.npmt_bins = self.n_lens_sys * self.n_pmts_per_surf  # One curved detecting surf for each lens system

        # Comment this out to allow access to old calibration files
        self.lens_centers = get_lens_triangle_centers(
            config.vtx,
            self.lns_rad,
            config.diameter_ratio,
            config.thickness_ratio,
            config.half_EPD,
            config.blockers,
            blocker_thickness_ratio=config.blocker_thickness_ratio,
            light_confinement=config.light_confinement,
            focal_length=config.focal_length,
            lens_system_name=config.lens_system_name)
        self.lens_rad = config.half_EPD

        #self.calc1 = self.pmtxbins/self.pmt_side_length
        #self.calc2 = self.pmtxbins/2.0
        #self.calc3 = 2*self.pmtybins/(np.sqrt(3)*self.pmt_side_length)
        #self.calc4 = self.pmtybins/3.0
        #self.calc5 = self.pmtxbins*self.pmtybins
        self.c_rings = np.cumsum(self.ring)
        self.c_rings_rolled = np.roll(self.c_rings, 1)
        self.c_rings_rolled[0] = 0

        logger.info('Detector rings: %s, cumulative pixels in rings: %s' %
                    (str(self.ring), str(self.c_rings)))
Beispiel #17
0
# Driver for generating new hdf5/dd event files and diagnosing AVF algorithm
if __name__ == '__main__':
    import DetectorResponseGaussAngle
    import EventAnalyzer

    parser = argparse.ArgumentParser()
    parser.add_argument('h5_file', help='Event HDF5 file')
    args = parser.parse_args()

    event = DIEventFile.load_from_file(args.h5_file)
    title = str(event.gun_specs['energy']) + ' MeV ' + str(
        event.gun_specs['particle'])
    vertices = None
    if event.tracks is not None:
        logger.info('Track count: ' + str(len(event.tracks)))
        #event.tracks.sigmas.fill(0.01)  # TODO: Temporary hack because I think we forced 0.0001 into the tracks in the test file.  Sigmas too small really screw up the machine!!
        print_tracks(event.tracks, 20)

        calibrated_simulation = True
        if event.simulation_params is not None and 'calibrated' in event.simulation_params:
            calibrated_simulation = event.simulation_params['calibrated']
            logger.info(
                '=== Simulation event used calibrated detector: %s ===' %
                str(calibrated_simulation))
        else:
            logger.info('=== No calibration flag in event file ===')
            logger.info('Photons in file: %d' %
                        len(event.full_event.photons_end))

        config = detectorconfig.get_detector_config(
Beispiel #18
0
def fire_g4_particles(sample_count,
                      config,
                      particle,
                      energy,
                      inner_radius,
                      outer_radius,
                      h5_file,
                      location=None,
                      momentum=None,
                      di_file_base=None,
                      qe=None):
    from chroma.generator import vertex

    config_name = config.config_name
    sim, analyzer = sim_setup(config,
                              paths.get_calibration_file_name(config_name),
                              useGeant4=True,
                              geant4_processes=1,
                              no_gpu=False)
    #analyzer.det_res.is_calibrated=False    # Temporary to test AVF with actual photon angles vs. calibration angles

    logger.info('Configuration:\t%s' % config_name)
    logger.info('Particle:\t\t%s ' % particle)
    logger.info('Energy:\t\t%d' % energy)
    logger.info('Sim count:\t\t%d' % sample_count)
    logger.info('File:\t\t%s' % h5_file)

    if location is None:  # Location is a flag
        loc_array = sph_scatter(sample_count, inner_radius * 1000,
                                outer_radius * 1000)
    else:
        loc_array = [location]

    with h5py.File(h5_file, 'w') as f:
        first = True
        logger.info('Running locations:\t%d' % len(loc_array))
        for i, lg in enumerate(loc_array):
            logger.info('Location:\t\t%s' % str(lg))
            if location is None:
                gun = vertex.particle_gun([particle], vertex.constant(lg),
                                          vertex.isotropic(),
                                          vertex.flat(
                                              float(energy) * 0.999,
                                              float(energy) * 1.001))
            else:
                gun = vertex.particle_gun(
                    [particle], vertex.constant(lg), vertex.constant(momentum),
                    vertex.constant(energy)
                )  # TODO: AWS seems to require: vertex.constant(np.array(momentum))

            events = sim.simulate(gun,
                                  keep_photons_beg=True,
                                  keep_photons_end=True,
                                  run_daq=False,
                                  max_steps=100)
            for ev in events:  # Note: There is really only ever one event because we enumerate loc_array above
                vert = ev.photons_beg.pos
                tracks = analyzer.generate_tracks(ev, qe=qe)
                write_h5_reverse_track_file_event(f, vert, tracks, first)
                first = False

                #vertices = utilities.AVF_analyze_event(analyzer, ev)
                #utilities.plot_vertices(ev.photons_beg.track_tree, 'AVF plot', reconstructed_vertices=vertices)
                if di_file_base is not None:
                    gun_specs = build_gun_specs(particle, lg, None, energy)
                    di_file = DIEventFile(config_name, gun_specs,
                                          ev.photons_beg.track_tree, tracks,
                                          ev.photons_beg, ev)
                    di_file.write(di_file_base + '_' + str(i) + '.h5')

            logger.info('Photons detected:\t%s' % str(tracks.sigmas.shape[0]))
            logger.info('============')
Beispiel #19
0
def plot_vertices(track_tree,
                  title,
                  with_electrons=True,
                  file_name=None,
                  reconstructed_vertices=None,
                  reconstructed_vertices2=None):
    particles = {}
    energies = {}
    for key, value in track_tree.iteritems():
        if 'particle' in value:
            particle = value['particle']
            if particle not in particles:
                particles[particle] = []
                energies[particle] = []
            particles[particle].append(value['position'])
            energies[particle].append(100. * value['energy'])

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    for key, value in particles.iteritems():
        if with_electrons or key != 'e-':
            the_array = np.array(value)
            #ax.plot(the_array[:,0], the_array[:,1], the_array[:,2], '.', markersize=5.0)
            ax.scatter(the_array[:, 0],
                       the_array[:, 1],
                       the_array[:, 2],
                       marker='o',
                       s=energies[key],
                       label=key)  #), markersize=5.0)
    if reconstructed_vertices is not None:
        vertex_positions = []
        for v in reconstructed_vertices:
            logger.info('Vertex position: %s' % v.pos)
            vertex_positions.append(np.asarray(v.pos))
        vp = np.asarray(vertex_positions)
        logger.info('AVF positions: ' + str(vp))
        ax.scatter(vp[:, 0],
                   vp[:, 1],
                   vp[:, 2],
                   marker=(6, 1, 0),
                   s=100.,
                   color='gray',
                   label='AVF')  #), markersize=5.0)
    # Optionally plot reconstructed vertices as well
    if reconstructed_vertices2 is not None:
        vertex_positions = []
        for v in reconstructed_vertices2:
            logger.info('Vertex position: %s' % v.pos)
            vertex_positions.append(np.asarray(v.pos))
        vp = np.asarray(vertex_positions)
        logger.info('AVF 2 positions: ' + str(vp))
        ax.scatter(vp[:, 0],
                   vp[:, 1],
                   vp[:, 2],
                   marker=(6, 1, 0),
                   s=100.,
                   color='black',
                   label='AVF 2')  #), markersize=5.0)

    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')
    ax.set_title(title)

    plt.legend(loc=2)  # See https://pythonspot.com/3d-scatterplot/

    # See: http://fredborg-braedstrup.dk/blog/2014/10/10/saving-mpl-figures-using-pickle
    if file_name is not None:
        pickle.dump(fig, file(file_name, 'wb'))
    plt.show()
Beispiel #20
0
def _full_detector_simulation(config, kabamland, amount, simname, datadir=""):
    # simulates 1000*amount photons uniformly spread throughout a sphere whose radius is the inscribed radius of the icosahedron.
    # Note that viewing may crash if there are too many lenses. (try using configview)
    from chroma.sim import Simulation  # Require CUDA, so only import when necessary

    config_name = config.config_name
    file_name_base = datadir + simname
    if USE_ROOT:
        from ShortIO.root_short import ShortRootWriter
        f = ShortRootWrite(file_name_base + '.root')

    # SHERLOCK: Have to set the seed because Sherlock compute machines blow up if we use chroma's algorithm!!!!
    # sim = Simulation(kabamland, geant4_processes=0, seed=65432)  # For now, does not take advantage of multiple cores  # TODO: use sim_setup()?
    sim = Simulation(
        kabamland, geant4_processes=0
    )  # For now, does not take advantage of multiple cores  # TODO: use sim_setup()?
    with h5py.File(file_name_base + '.h5', 'w') as h5_file:
        # Total photons will be LOOP_COUNT * EVENT_COUNT * amount
        LOOP_COUNT = 100
        EVENT_COUNT = 10

        # Setup to write the hdf5 file incrementally
        # Can't use deepdish as it seems to require a single write which takes up too much memory
        start_pos = h5_file.create_dataset('photons_start',
                                           shape=(LOOP_COUNT * EVENT_COUNT,
                                                  amount, 3),
                                           dtype=np.float32,
                                           chunks=True)
        end_pos = h5_file.create_dataset('photons_stop',
                                         shape=(LOOP_COUNT * EVENT_COUNT,
                                                amount, 3),
                                         dtype=np.float32,
                                         chunks=True)
        photon_flags = h5_file.create_dataset('photon_flags',
                                              shape=(
                                                  LOOP_COUNT * EVENT_COUNT,
                                                  amount,
                                              ),
                                              dtype=np.uint32,
                                              chunks=True)

        # Store the UUID and name to enable matching of the configuration, calibration, and simulation files
        h5_file.attrs['config_name'] = config_name
        h5_file.attrs['config_UUID'] = str(config.uuid)

        process = psutil.Process(os.getpid())
        logger.info('Memory size: %d MB' %
                    (process.memory_info().rss // 1000000))
        for j in range(LOOP_COUNT):
            logger.info('%d of %d event sets' % (j, LOOP_COUNT))
            ev_index = 0
            sim_events = [
                kb.uniform_photons(config.detector_radius, amount)
                for i in range(EVENT_COUNT)
            ]
            for ev in sim.simulate(sim_events,
                                   keep_photons_beg=True,
                                   keep_photons_end=True,
                                   run_daq=False,
                                   max_steps=100):
                start_pos[(j * EVENT_COUNT) + ev_index] = ev.photons_beg.pos
                end_pos[(j * EVENT_COUNT) + ev_index] = ev.photons_end.pos
                photon_flags[(j * EVENT_COUNT) +
                             ev_index] = ev.photons_end.flags
                ev_index += 1

                if USE_ROOT:
                    # In this case, write both hdf5 and ROOT simulation files
                    f.write_event(ev)
            logger.info('Memory size: %d MB' %
                        (process.memory_info().rss // 1000000))

    if USE_ROOT:
        f.close()
    def calibrate(self,
                  simname,
                  directory=".",
                  nevents=-1,
                  fast_calibration=False):
        # Use with a simulation file 'simname' to calibrate the detector
        # Creates a list of mean angles and their uncertainties (sigma for
        # a cone of unit length), one for each PMT
        # There are 1000 events in a typical simulation.
        # Uses all photons hitting a given PMT at once (better estimate of sigma,
        # but may run out of memory in some cases).
        # Will not calibrate PMTs with <n_min hits
        logger.info('Fast calibration: %s' % str(fast_calibration))
        self.is_calibrated = True
        start_time = time.time()

        base_hits_file_name = self.configname + '-hits'
        pickle_name = base_hits_file_name + '.pickle'
        hit_file_exists = False
        n_min = 10  # Do not calibrate a PMT if <n_min photons hit it
        try:
            logger.info('Attempting to load pickle hits file: %s%s' %
                        (directory, pickle_name))
            # TODO: This generally won't do anything because we no longer write this file
            with open(directory + pickle_name, 'rb') as inf:
                pmt_hits = pickle.load(inf)
            logger.info('Hit map pickle file loaded: ' + pickle_name)
            pmt_bins = pmt_hits['pmt_bins']
            end_direction_array = pmt_hits['end_direction_array']
            n_det = len(pmt_bins)
            hit_file_exists = True
        except IOError as error:
            # Assume file not found
            logger.info('Hit map pickle file not found.  Creating: ' +
                        base_hits_file_name)

            using_h5 = False
            if simname.endswith('.h5'):
                using_h5 = True
                ev_file = dd.io.load(simname)
                # TODO: Check the UUID!!
                events_in_file = len(ev_file['photons_start'])
            else:
                from ShortIO.root_short import GaussAngleRootWriter, GaussAngleRootReader, ShortRootReader
                reader = ShortRootReader(simname)
                events_in_file = len(reader)
            logger.info('Loaded simulation file: %s' % simname)
            logger.info('Simulation event count: %d' % events_in_file)

            if nevents < 1:
                nevents = events_in_file

            max_storage = min(
                nevents * 1000000, 120000000
            )  #600M is too much, 400M is OK (for np.float32; using 300M)
            end_direction_array = np.empty((max_storage, 3), dtype=np.float32)
            pmt_bins = np.empty(max_storage, dtype=np.int)
            n_det = 0

            # Loop through events, store for each photon the index of the PMT it hit (pmt_bins)
            # and the direction pointing back to its origin (end_direction_array)
            loops = 0
            event_source = ev_file['photons_start'] if using_h5 else reader
            for index, ev_proxy in enumerate(
                    event_source):  # Not sure if we can enumerate a reader????
                # TODO: This needs to be cleaned up
                if (using_h5):
                    photons_beg = Photons(ev_proxy, [], [], [])
                    photons_end = Photons(ev_file['photons_stop'][index], [],
                                          [], [],
                                          flags=ev_file['photon_flags'][index])
                else:
                    photons_beg = ev_proxy.photons_beg
                    photons_end = ev_proxy.photons_end

                loops += 1
                if loops > nevents:
                    break

                if loops % 100 == 0:
                    logger.info("Event " + str(loops) + " of " + str(nevents))
                    logger.handlers[0].flush()

                detected = (photons_end.flags & (0x1 << 2)).astype(bool)
                '''
                reflected_diffuse = (ev.photons_end.flags & (0x1 << 5)).astype(bool)
                reflected_specular = (ev.photons_end.flags & (0x1 << 6)).astype(bool)
                logger.info("Total detected: " + str(sum(detected * 1)))
                logger.info("Total reflected: " + str(sum(reflected_diffuse * 1) + sum(reflected_specular * 1)))
                good_photons = detected & np.logical_not(reflected_diffuse) & np.logical_not(reflected_specular)
                logger.info("Total detected and not reflected: " + str(sum(good_photons * 1)))
                '''
                if fast_calibration:
                    ending_photons, length = self._find_photons_for_pmt(
                        photons_beg.pos, photons_end.pos, detected,
                        end_direction_array, n_det, max_storage)
                    pmt_b = self.find_pmt_bin_array(ending_photons)
                    if length is None:
                        break
                else:
                    beginning_photons = photons_beg.pos[
                        detected]  # Include reflected photons
                    ending_photons = photons_end.pos[detected]
                    length = np.shape(ending_photons)[0]
                    pmt_b = self.find_pmt_bin_array(ending_photons)
                    end_point = self.lens_centers[pmt_b / self.n_pmts_per_surf]
                    end_dir = normalize(end_point - beginning_photons)
                    # if end_direction_array is None:
                    #     end_direction_array = end_dir
                    # else:
                    #     end_direction_array = np.vstack((end_direction_array, end_dir))
                    #end_direction_array.append(end_dir)
                    if n_det + length > max_storage:
                        logger.info(
                            'Too many photons to store in memory; not reading any further events.'
                        )
                        break
                    end_direction_array[n_det:(n_det + length), :] = end_dir
                    # if pmt_bins is None:
                    #     pmt_bins = pmt_b
                    # else:
                    #     pmt_bins = np.hstack((pmt_bins, pmt_b))
                    #pmt_bins.append(pmt_b)
                pmt_bins[n_det:(n_det + length)] = pmt_b
                n_det += length
                if loops % 100 == 0:
                    logger.info('Photons detected so far: ' +
                                str(n_det + length))
                    # logger.info('Sample pmt bins: ' + str(pmt_bins[n_det:(n_det+length)]))
                    logger.info("Time: " + str(time.time() - start_time))

        total_means = np.zeros((self.npmt_bins, 3))
        total_variances = np.zeros((self.npmt_bins))
        total_u_minus_v = np.zeros((self.npmt_bins))
        amount_of_hits = np.zeros((self.npmt_bins))

        end_direction_array.resize((n_det, 3))
        logger.info("Time: " + str(time.time() - start_time))
        pmt_bins.resize(n_det)

        if not hit_file_exists:
            # TODO: No longer writing the pickle file
            # Write the pickle hits file regardless of whether using fast_calibration or not
            #pmt_hits = {'pmt_bins': pmt_bins, 'end_direction_array': end_direction_array}
            #with open(directory+pickle_name, 'wb') as outf:
            #    pickle.dump(pmt_hits, outf)
            with h5py.File(directory + base_hits_file_name + '.h5',
                           'w') as h5file:
                _ = h5file.create_dataset(
                    'pmt_bins', data=pmt_bins,
                    chunks=True)  # TODO: Should we assign max shape?
                _ = h5file.create_dataset(
                    'end_direction_array',
                    data=end_direction_array,
                    chunks=True)  # TODO: Should we assign max shape?
            logger.info('Hit map file created: ' + pickle_name)

        logger.info(
            "Finished collecting photons (or loading photon hit list).  Time: "
            + str(time.time() - start_time))

        if fast_calibration:
            bins_base_file_name = self.configname + '-pmt-bins'
            bins_pickle_file = bins_base_file_name + '.pickle'
            try:
                with open(
                        directory + bins_pickle_file, 'rb'
                ) as inf:  # TODO: This generally won't do anything because we no longer write this file
                    pmt_photons = pickle.load(inf)
                logger.info('PMT photon list pickle file loaded: ' +
                            bins_pickle_file)
            except IOError as error:
                start_assign = time.time()
                pmt_photons = assign_photons(self.npmt_bins, n_det, pmt_bins)
                logger.info("assign_photons took: " +
                            str(time.time() - start_assign))
                # TODO: No longer writing the pickle file
                #with open(directory + bins_pickle_file, 'wb') as outf:
                #    pickle.dump(pmt_photons, outf)

                # TODO: Pure H5 does not work.  (Because?)
                #with h5py.File(directory + bins_file + '.h5', 'w') as h5file:
                #    _ = h5file.create_dataset('photon_pmts', data=pmt_photons, chunks=True)   # Should we assign max shape?
                #logger.info('Type: ' + str(type(pmt_photons)) + ' ' + str(type(pmt_photons[0])))
                dd.io.save(directory + bins_base_file_name + '.h5',
                           pmt_photons)
                logger.info('PMT photon list file created: ' +
                            bins_base_file_name + '.h5')

        logger.info("Finished listing photons by pmt.  Time: " +
                    str(time.time() - start_time))

        draw_pmt_ind = -1

        # looping through each pmt in order to save a mean_angle and a variance
        for i in range(self.npmt_bins):
            if i % 10000 == 0:
                logger.info(
                    str(i) + ' out of ' + str(self.npmt_bins) + ' PMTs')
                logger.handlers[0].flush()
                logger.info("Time: " + str(time.time() - start_time))

            if fast_calibration:
                photon_list = pmt_photons[i]
                angles_for_pmt = end_direction_array[photon_list]
                n_angles = len(angles_for_pmt)  # np.shape(angles_for_pmt)[0]
                # skipping pmts with <2 photon hits (in which case the variance will be undefined with ddof=1)
                # also skipping if <n_min photon hits
                if n_angles < 2 or n_angles < n_min:
                    logger.warning('Not enough angles for PMT: %d, %d' %
                                   (i, n_angles))
                    continue

                mean_angle, variance, uvvar = compute_pmt_calibration(
                    angles_for_pmt, n_min)
            else:
                pmt_indices = np.where(pmt_bins == i)[0]
                if np.shape(pmt_indices)[0] == 0:
                    continue
                angles_for_pmt = end_direction_array[pmt_indices]

                n_angles = np.shape(angles_for_pmt)[0]
                #skipping pmts with <2 photon hits (in which case the variance will be undefined with ddof=1)
                #also skipping if <n_min photon hits
                if n_angles < 2 or n_angles < n_min:
                    continue
                mean_angle = normalize(np.mean(angles_for_pmt, axis=0))

                # For each PMT, get a pair of axes which form an
                # orthonormal coordinate system with the PMT mean direction
                u_dir = np.cross(mean_angle, np.array([0, 0, 1]))
                if not (np.dot(u_dir, u_dir) >
                        0):  # In case mean_angle = [0,0,1]
                    u_dir = np.cross(mean_angle, np.array([0, 1, 0]))
                u_dir = normalize(u_dir)
                v_dir = np.cross(mean_angle, u_dir)

                u_proj = np.dot(angles_for_pmt, u_dir)
                u_var = np.var(u_proj, ddof=1)
                v_proj = np.dot(angles_for_pmt, v_dir)
                v_var = np.var(v_proj, ddof=1)
                variance = (u_var + v_var) / 2.

                # Old method, which calculated variance of projected norma, even though
                # the mean of the projections wasn't 0 due to the solid angle factor
                norms = np.repeat(1.0, n_angles)
                projection_norms = np.dot(angles_for_pmt, mean_angle)
                orthogonal_complements = np.sqrt(
                    np.maximum(norms**2 - projection_norms**2, 0.))
                #variance = np.var(orthogonal_complements, ddof=1)

                uvvar = u_var - v_var
            try:
                #draw_pmt_ind = None
                draw_pmt_ind = int(draw_pmt_ind)
                '''
                if i == draw_pmt_ind or draw_pmt_ind<0:
                    # Temporary, to visualize histogram of angles, distances
                    #angles = np.arccos(projection_norms)
                    #ang_variance = np.var(angles, ddof=1)
                    #fig1 = plt.figure(figsize=(7.8, 6))
                    #plt.hist(angles, bins=20)
                    #plt.xlabel('Angular Separation to Mean Angle')
                    #plt.ylabel('Counts per bin')
                    #plt.title('Angles Histogram for PMT ' + str(i))
                    ##plt.show()
                    #
                    #fig2 = plt.figure(figsize=(7.8, 6))
                    #plt.hist(angles, bins=20, weights=1./np.sin(angles))
                    #plt.xlabel('Angular Separation to Mean Angle')
                    #plt.ylabel('Counts per solid angle')
                    #plt.title('Angles Histogram for PMT ' + str(i))

                    fig3 = plt.figure(figsize=(7.8, 6))
                    plt.hist(orthogonal_complements, bins=20)
                    plt.xlabel('Normalized Distance to Mean Angle')
                    plt.ylabel('Counts per bin')
                    plt.title('Distances Histogram for PMT ' + str(i))

                    fig4 = plt.figure(figsize=(7.8, 6))
                    plt.hist(u_proj, bins=20)
                    plt.xlabel('U Distance to Mean Angle')
                    plt.ylabel('Counts per bin')
                    plt.title('U Distances Histogram for PMT ' + str(i))

                    fig5 = plt.figure(figsize=(7.8, 6))
                    plt.hist(v_proj, bins=20)
                    plt.xlabel('V Distance to Mean Angle')
                    plt.ylabel('Counts per bin')
                    plt.title('V Distances Histogram for PMT ' + str(i))
                    plt.show()

                    #print "Average projected variance: ", variance
                    #print "Variance of projected 2D norms: ", np.var(orthogonal_complements, ddof=1)
                    draw_pmt_ind = raw_input("Enter index of next PMT to draw; will stop drawing if not a valid PMT index.\n")
                '''
            except ValueError:
                pass
            except TypeError:
                pass

            total_means[i] = mean_angle
            total_variances[i] = variance
            total_u_minus_v[i] = np.abs(uvvar)
            amount_of_hits[i] = n_angles
            if np.isnan(variance):
                print "Nan for PMT " + str(i)
                # nan_ind = np.where(np.isnan(orthogonal_complements))
                # print nan_ind
                # print projection_norms
                # print orthogonal_complements
                # print angles_for_pmt
                # print variance
                # print mean_angle

        # temporary, for debugging:
        n_hits = np.sum(amount_of_hits, axis=0)
        print "Total hits for calibrated PMTs: " + str(n_hits)
        print "PMTs w/ < n_events hits: " + str(
            len(np.where(amount_of_hits < nevents)[0]) * 1.0 / self.npmt_bins)
        print "PMTs w/ < n_min hits: " + str(
            len(np.where(amount_of_hits < n_min)[0]) * 1.0 / self.npmt_bins)
        print "PMTs w/ < 100 hits: " + str(
            len(np.where(amount_of_hits < 100)[0]) * 1.0 / self.npmt_bins)
        print "Mean U-V variance (abs): " + str(np.mean(total_u_minus_v))

        # Store final calibrated values
        self.means = -total_means.astype(np.float32).T
        self.sigmas = np.sqrt(total_variances.astype(np.float32))