Example #1
0
def compare_weight_coordinates(first_hdf_file_name, second_hdf_file_name):

    first_hdf_file = h5py.File(first_hdf_file_name, 'a')
    second_hdf_file = h5py.File(second_hdf_file_name, 'a')

    particles_name_first = read_hdf_file.get_particles_name(first_hdf_file)
    particles_groups_first = read_hdf_file.ParticlesGroups(
        particles_name_first)
    first_hdf_file.visititems(particles_groups_first)

    particles_name_second = read_hdf_file.get_particles_name(second_hdf_file)
    particles_groups_second = read_hdf_file.ParticlesGroups(
        particles_name_second)
    second_hdf_file.visititems(particles_groups_second)

    size_groups = len(particles_groups_second.particles_groups)

    for i in range(0, size_groups):
        weight_first, positions_first = get_dataset_values(
            particles_groups_first.particles_groups[i], 'position')
        weight_first, momentum_first = get_dataset_values(
            particles_groups_first.particles_groups[i], 'momentum')

        weight_second, positions_second = get_dataset_values(
            particles_groups_second.particles_groups[i], 'position')
        weight_second, momentum_second = get_dataset_values(
            particles_groups_second.particles_groups[i], 'momentum')

        count_weight_difference(weight_first, positions_first, weight_second,
                                positions_second)
        count_weight_difference(weight_first, momentum_first, weight_second,
                                momentum_second)
Example #2
0
def get_particles_groups(hdf_file, hdf_file_reduction_name):

    hdf_file_reduction = h5py.File(hdf_file_reduction_name, 'a')
    particles_name = read_hdf_file.get_particles_name(hdf_file_reduction)
    particles_collect = read_hdf_file.ParticlesGroups(particles_name)
    hdf_file.visititems(particles_collect)

    return particles_collect, hdf_file_reduction
def base_reading_function(hdf_file_name):
    hdf_file = h5py.File(hdf_file_name, 'a')

    particles_name = read_hdf_file.get_particles_name(hdf_file)
    particles_collect = read_hdf_file.ParticlesGroups(particles_name)
    print(particles_collect)
    hdf_file.visititems(particles_collect)

    for group in particles_collect.particles_groups:
        print('name group ' + str(group.name))

        get_particles_sizes(hdf_file, group)
Example #4
0
def base_corparation(first_hdf_file_name, second_hdf_file_name, csv_file_name,
                     file_indetifacator):

    first_hdf_file = h5py.File(first_hdf_file_name, 'a')
    second_hdf_file = h5py.File(second_hdf_file_name, 'a')

    particles_name_first = read_hdf_file.get_particles_name(first_hdf_file)

    particles_groups_first = read_hdf_file.ParticlesGroups(
        particles_name_first)
    first_hdf_file.visititems(particles_groups_first)

    particles_name_second = read_hdf_file.get_particles_name(second_hdf_file)

    particles_groups_second = read_hdf_file.ParticlesGroups(
        particles_name_second)
    second_hdf_file.visititems(particles_groups_second)

    idx = first_hdf_file_name.rfind('/')
    name_of_file = first_hdf_file_name[idx + 1:len(first_hdf_file_name) - 3]

    for i in range(0, len(particles_groups_first.particles_groups)):

        substr = particles_groups_first.particles_groups[i].name
        print('name group ' + str(substr))

        name_of_group = substr[substr.rfind('/') + 1:len(substr)]


        absolute_coordinates_first, dimensions_first, weights_first =\
            read_group_values(particles_groups_first, i)

        if len(absolute_coordinates_first) == 0:
            return


        absolute_coordinates_second, dimensions_second, weights_second =\
            read_group_values(particles_groups_second, i)

        kernel_coords_first_x, kernel_coords_first_y, kernel_coords_first_z =\
            compute_1d_kernels(0, dimensions_first.dimension_position, absolute_coordinates_first, weights_first)



        kernel_coords_second_x, kernel_coords_second_y, kernel_coords_second_z =\
            compute_1d_kernels(0, dimensions_second.dimension_position, absolute_coordinates_second, weights_second)
        iteration_name = name_of_group + "coords"

        row_coordinates = \
            compute_1_d_stats_metrics(kernel_coords_first_x, kernel_coords_first_y, kernel_coords_first_z,
                                      kernel_coords_second_x, kernel_coords_second_y, kernel_coords_second_z, name_of_file, iteration_name)

        write_values_into_csv_file(row_coordinates, csv_file_name)

        ######
        momentum_idx = dimensions_first.dimension_position + dimensions_first.dimension_momentum
        kernel_coords_first_x, kernel_coords_first_y, kernel_coords_first_z = \
            compute_1d_kernels(dimensions_first.dimension_position, momentum_idx, absolute_coordinates_first, weights_first)

        kernel_coords_second_x, kernel_coords_second_y, kernel_coords_second_z = \
            compute_1d_kernels(dimensions_first.dimension_position, momentum_idx, absolute_coordinates_second, weights_second)
        iteration_name = name_of_group + "momentum"

        row_momentum = \
            compute_1_d_stats_metrics(kernel_coords_first_x, kernel_coords_first_y, kernel_coords_first_z,
                                      kernel_coords_second_x, kernel_coords_second_y, kernel_coords_second_z,
                                      name_of_file, iteration_name)

        write_values_into_csv_file(row_momentum, csv_file_name)
        sorted_momentum = create_sorted_momentum_list(data)
        cells = create_momentum_cells(sorted_momentum,
                                      self.parameters.tolerance_momentum,
                                      x_segment, y_segment, z_segment)
        data, weights = recount_cells(data, weigths, cells, mass)
        return data, weights


if __name__ == "__main__":
    hdf_file_name = '/home/kseniia/Documents/k_means_measure/data00000355.h5'
    hdf_file_reduction_name = '/home/kseniia/Documents/k_means_measure/data00000355_thinning.h5'
    copyfile(hdf_file_name, hdf_file_reduction_name)
    hdf_file = h5py.File(hdf_file_name, 'a')
    hdf_file_reduction = h5py.File(hdf_file_reduction_name, 'a')
    particles_name = read_hdf_file.get_particles_name(hdf_file_reduction)
    particles_collect = read_hdf_file.ParticlesGroups(particles_name)
    hdf_file.visititems(particles_collect)
    tolerance_parsent = 1.9e-23

    dimension = 3
    type_particles = 'mass'

    parameters = Vranic_merging_algorithm_parameters(tolerance_parsent,
                                                     dimension, type_particles)
    group = particles_collect.particles_groups[0]
    for group in particles_collect.particles_groups:
        data, weights, dimensions\
            = read_hdf_file.read_points_group(group)
        algorithm = Vranic_merging_algorithm(parameters)
        data = np.array(data)