コード例 #1
0
ファイル: test_voxel.py プロジェクト: DeepLearnPhysics/larcv3
def test_Voxel_h_Voxel():
    vox1 = larcv.Voxel()
    vox2 = larcv.Voxel(0, 0.0)
    vox3 = larcv.Voxel(10, 50.0)

    assert (vox1.id() == larcv.kINVALID_VOXELID)
    assert (vox2.id() == 0)
    assert (vox3.id() == 10)

    assert (vox1.value() == larcv.kINVALID_FLOAT)
    assert (vox2.value() == 0.0)
    assert (vox3.value() == 50.)
コード例 #2
0
ファイル: test_voxel.py プロジェクト: DeepLearnPhysics/larcv3
def test_sparse_tensor_downsample(dimension, pooling):

    # Create image Meta:
    meta = image_meta_factory(dimension)
    meta.set_projection_id(0)
    for dim in range(dimension):
        L = 10.
        N = 128
        meta.set_dimension(dim, L, N)

    if dimension == 2:
        st = larcv.SparseTensor2D()
    if dimension == 3:
        st = larcv.SparseTensor3D()
    st.meta(meta)

    # Get a set of voxels:
    voxel_set_list = data_generator.build_sparse_tensor(1, n_projections=1)
    indexes = voxel_set_list[0][0]['indexes']
    values = voxel_set_list[0][0]['values']
    n_voxels = voxel_set_list[0][0]['n_voxels']
    for j in range(n_voxels):
        if pooling == larcv.kPoolMax:
            # Only use positive values for max pooling.
            # Negative values have different behavior in sparse vs dense
            # max pooling.
            st.emplace(larcv.Voxel(indexes[j], numpy.abs(values[j])), False)
        else:
            st.emplace(larcv.Voxel(indexes[j], values[j]), False)

    # Dense downsampling is tested against skimage
    # Here, test against dense downsampling

    compression = 2

    st_dense = st.to_tensor()
    st_dense_compressed = st_dense.compress(compression, pooling).as_array()

    st_compressed = st.compress(compression, pooling)

    st_compressed_dense = st_compressed.dense()
    print(st_dense.as_array())

    # Do some checks:
    assert numpy.abs((st_compressed_dense.sum() - st_dense_compressed.sum()) /
                     st_dense_compressed.sum()) < 1e-6
    max_index = numpy.prod(st_compressed_dense.shape)
    for i in range(50):
        index = numpy.random.randint(0, max_index)
        assert numpy.abs(
            st_compressed_dense.take(index) -
            st_dense_compressed.take(index)) < 1e-4
コード例 #3
0
ファイル: test_voxel.py プロジェクト: DeepLearnPhysics/larcv3
def test_Voxel_h_VoxelSet():

    vs = larcv.VoxelSet()

    n_voxels = 10
    for i in range(n_voxels):
        vs.emplace(i, i, False)

    vec = vs.as_vector()

    assert (len(vec) == n_voxels)
    vs.clear_data()

    n_voxels = 11
    for j in range(n_voxels):
        vs.insert(larcv.Voxel(i + j, i + j))

    vs.clear_data()
    n_voxels = 12
    for j in range(n_voxels):
        vs.add(larcv.Voxel(i + j, i + j))

    assert (vs.size() == n_voxels)
コード例 #4
0
def store_lr_hits(io_manager, this_lr_hits):
    event_sparse3d = io_manager.get_data("sparse3d", "lr_hits")

    meta = get_NEW_LR_meta()

    st = larcv.SparseTensor3D()
    st.meta(meta)

    unique = numpy.unique(this_lr_hits['Z'])

    for row in this_lr_hits:
        index = meta.position_to_index([row['X'], row['Y'], row['Z']])

        if index >= meta.total_voxels():
            print(
                "Skipping voxel at original coordinates ({}, {}, {}) as it is out of bounds"
                .format(row['X'], row['Y'], row['Z']))
            continue
        st.emplace(larcv.Voxel(index, row['E']), False)

    event_sparse3d.set(st)
コード例 #5
0
def convert_files( io_manager, next_new_meta, file_groups, convert_mc_hits = False ):


    # for fidc, fname in enumerate(file_groups):
    fidc, (fname, fgroup) = file_groups
    # fname = fname.split('/')[-1]

    fname = top_input_path + os.path.basename(fname)

    # try:
    evtfile = tb.open_file(fname, 'r')
    # print(evtfile)
    # except Exception:
    #     continue

    # Instead of slicing and dicing later, we read everything into memory up front:
    high_threshold_voxels = evtfile.root.CHITS.highTh.read()
    low_threshold_voxels = evtfile.root.CHITS.lowTh.read()

    if convert_mc_hits:
        mc_extents = evtfile.root.MC.extents.read()
        mc_hits_voxels = evtfile.root.MC.hits.read()
        
        mc_hit_first_index = 0

    run = 0
    for ievt,event in fgroup.iterrows():
        label = 0
        if event['positron'] == 1.0 and event['E_add'] == 0.0:
            label = 1

        previous_event = event['event']

        io_manager.set_id(run, fidc, int(event['event']))

        ################################################################################
        # Store the particle information:
        larcv_particle = io_manager.get_data("particle", "label")
        particle = larcv.Particle()
        particle.energy_init(0.)
        particle.pdg_code(label)
        larcv_particle.append(particle)
        ################################################################################
        # Store the voxel information:



        ################################################################################
        # Store the highTh info:
        event_sparse3d = io_manager.get_data("sparse3d", "voxels")
        st = larcv.SparseTensor3D()
        st.meta(next_new_meta[0])

        voxel_idcs = high_threshold_voxels['event'] == event['event']
        voxels = high_threshold_voxels[voxel_idcs]

        # Find all the NaNs:
        weights = voxels['Ec']
        is_nan_array = np.isnan(weights)

        # Extract the positions:
        position_array = voxels[['X','Y','Z']]


        # get the index array:
        index = [ next_new_meta[0].position_to_index(p) for p in position_array ]

        _ = [st.emplace(larcv.Voxel(index[i], weights[i]), True) for i in range(len(index)) if not is_nan_array[i]]

        event_sparse3d.set(st)
        ################################################################################



        ################################################################################
        # Store the lowTh info:
        event_sparse3d = io_manager.get_data("sparse3d", "voxels_low")
        st = larcv.SparseTensor3D()
        st.meta(next_new_meta[0])

        voxel_idcs = low_threshold_voxels['event'] == event['event']
        voxels = low_threshold_voxels[voxel_idcs]

        # Find all the NaNs:
        weights = voxels['Ec']
        is_nan_array = np.isnan(weights)

        # Extract the positions:
        position_array = voxels[['X','Y','Z']]


        # get the index array:
        index = [ next_new_meta[0].position_to_index(p) for p in position_array ]

        _ = [st.emplace(larcv.Voxel(index[i], weights[i]), True) for i in range(len(index)) if not is_nan_array[i]]

        event_sparse3d.set(st)
        ################################################################################


        if convert_mc_hits:
            ################################################################################
            # Store the mchit info:
            event_sparse3d = io_manager.get_data("sparse3d", "mchit")
            st = larcv.SparseTensor3D()
            st.meta(next_new_meta[1])


            if event['event'] >= len(mc_extents):
                break

            mc_hit_last_index = mc_extents[event['event']]['last_hit']

            mc_hits = mc_hits_voxels[mc_hit_first_index:mc_hit_last_index]
            mc_positions = mc_hits['hit_position']
            mc_energy    = mc_hits['hit_energy']

            mc_hit_first_index = mc_hit_last_index


            # Find all the NaNs:
            is_nan_array = np.isnan(mc_energy)

            # get the index array:
            index = [ next_new_meta[1].position_to_index(p) for p in mc_positions ]

            _ = [st.emplace(larcv.Voxel(index[i], mc_energy[i]), True) for i in range(len(index)) if not is_nan_array[i]]

            event_sparse3d.set(st)
            ################################################################################


        io_manager.save_entry()

    evtfile.close()

    return
コード例 #6
0
def convert_file(io_manager, next_new_meta, fname, run, subrun, start_entry,
                 end_entry):

    evtfile = tb.open_file(str(fname), 'r')

    events = evtfile.root.Run.events.read()

    tracks = evtfile.root.Tracking.Tracks.read()
    summary = evtfile.root.Summary.Events.read()

    event_numbers = events['evt_number']
    event_energy = summary['evt_energy']

    convert_low_th = True

    # Instead of slicing and dicing later, we read everything into memory up front:
    high_threshold_voxels = evtfile.root.CHITS.highTh.read()

    if convert_low_th:
        low_threshold_voxels = evtfile.root.CHITS.lowTh.read()

    n_events = len(event_numbers)

    # Only loop over the needed entries:
    for ievt in range(start_entry, end_entry):
        if ievt >= len(event_energy): continue

        event = event_numbers[ievt]
        energy = event_energy[ievt]

        if energy < 1.0 or energy > 2.0:
            continue

        if ievt % 10 == 0:
            print(
                f"Beginning entry {ievt} of {n_events} which is event {event}")

        io_manager.set_id(int(run), int(subrun), event)

        ################################################################################
        # Store the particle information:
        larcv_particle = io_manager.get_data("particle", "label")
        particle = larcv.Particle()
        particle.energy_init(energy)
        larcv_particle.append(particle)
        ################################################################################

        ################################################################################
        # Store the highTh info:
        event_sparse3d = io_manager.get_data("sparse3d", "voxels")
        st = larcv.SparseTensor3D()
        st.meta(next_new_meta)

        voxel_idcs = high_threshold_voxels['event'] == event
        voxels = high_threshold_voxels[voxel_idcs]

        # Find all the NaNs:
        weights = voxels['Ec']
        is_nan_array = np.isnan(weights)

        # Extract the positions:
        position_array = voxels[['X', 'Y', 'Z']]

        # get the index array:
        index = [next_new_meta.position_to_index(p) for p in position_array]

        max_index = next_new_meta.total_voxels()
        _ = [
            st.emplace(larcv.Voxel(index[i], weights[i]), True)
            for i in range(len(index))
            if (not is_nan_array[i] and index[i] < max_index)
        ]

        event_sparse3d.set(st)
        ################################################################################

        if convert_low_th:
            ################################################################################
            # Store the lowTh info:
            event_sparse3d = io_manager.get_data("sparse3d", "voxels_low")
            st = larcv.SparseTensor3D()
            st.meta(next_new_meta)

            voxel_idcs = low_threshold_voxels['event'] == event
            voxels = low_threshold_voxels[voxel_idcs]

            # Find all the NaNs:
            weights = voxels['Ec']
            is_nan_array = np.isnan(weights)

            # Extract the positions:
            position_array = voxels[['X', 'Y', 'Z']]

            # get the index array:
            index = [
                next_new_meta.position_to_index(p) for p in position_array
            ]

            _ = [
                st.emplace(larcv.Voxel(index[i], weights[i]), True)
                for i in range(len(index)) if not is_nan_array[i]
            ]

            event_sparse3d.set(st)
            ################################################################################

        io_manager.save_entry()

    evtfile.close()

    return
コード例 #7
0
def store_mc_info(io_manager, this_hits, this_particles):
    event_cluster3d = io_manager.get_data("cluster3d", "mc_hits")

    cluster_indexes = numpy.unique(this_hits['particle_indx'])

    meta = get_NEW_LR_meta()

    sc = larcv.SparseCluster3D()
    sc.meta(meta)
    # sc.resize(len(cluster_indexes))

    cluster_lookup = {}
    for i, c in enumerate(cluster_indexes):
        cluster_lookup[c] = i

    vs = [larcv.VoxelSet() for i in cluster_indexes]

    # Add all the hits to the right cluster:
    for hit in this_hits:
        # Get the index from the meta
        index = meta.position_to_index(hit['hit_position'])
        # Create a voxel on the fly with the energy
        vs[cluster_lookup[hit['particle_indx']]].add(
            larcv.Voxel(index, hit['hit_energy']))

    # Add the voxel sets into the cluster set
    for i, v in enumerate(vs):
        v.id(i)  # Set id
        sc.insert(v)

    # Store the mc_hits as a cluster 3D
    event_cluster3d.set(sc)

    particle_set = io_manager.get_data("particle", "all_particles")

    positron = False

    # Now, store the particles:
    for i, particle in enumerate(this_particles):

        if particle['particle_name'] == b'e+' and particle[
                'initial_volume'] == b'ACTIVE':
            positron = True

        if b'Pb208' in particle['particle_name']:
            pdg_code = 30000000
        else:
            pdg_code = pdg_lookup[particle['particle_name']]

        p = larcv.Particle()
        p.id(i)  # id
        p.track_id(particle['particle_indx'])
        p.nu_current_type(
            particle['primary'])  # Storing primary info in nu_current_type
        p.pdg_code(pdg_code)
        p.parent_track_id(particle['mother_indx'])
        p.position(*particle['initial_vertex'])
        p.end_position(*particle['final_vertex'])
        p.creation_process(particle['creator_proc'])
        p.energy_init(particle['kin_energy'])
        p.momentum(*particle['momentum'])

        particle_set.append(p)

    return positron
コード例 #8
0
def test_import_Voxel_h_Voxel():

    v = larcv.Voxel()