Exemple #1
0
    def test_sk_partial(self):
        # TODO: this test fails with python 3 (small deviations)
        f = os.path.join(self.reference_path, 'kalj-small.xyz')
        ref_value = {
            'A': numpy.array([0.078218, 2.896436, 0.543363]),
            'B': numpy.array([0.867164, 0.869868, 0.981121]),
            'AB': numpy.array([-0.1907, 0.399360, 0.050480])
        }
        for species in ['A', 'B']:
            with trajectory.TrajectoryXYZ(f) as t:
                t.add_callback(filter_species, species)
                p = postprocessing.StructureFactor(t, [4, 7.3, 10])
                p.compute()
                self.assertLess(deviation(p.value, ref_value[species]), 1e-2)

        with trajectory.TrajectoryXYZ(f) as t:
            sk = postprocessing.Partial(postprocessing.StructureFactor,
                                        ['A', 'B'], t, [4, 7.3, 10])
            sk.compute()
            self.assertLess(
                deviation(sk.partial[('A', 'A')].value, ref_value['A']), 1e-2)
            self.assertLess(
                deviation(sk.partial[('B', 'B')].value, ref_value['B']), 1e-2)
            self.assertLess(
                deviation(sk.partial[('A', 'B')].value, ref_value['AB']), 1e-2)
Exemple #2
0
def calculate_structure_factor(traj_file,
                               out_file=None,
                               mode="separate_species",
                               **kwargs):
    """
    Assume trajectory has format .xyz.gz
    Writes the first peak of the structure factor for each species in out_file_kmax.
    """

    # decompress.
    # subprocess.run(["gzip", "--decompress", "--keep", "--force", traj_file])
    # traj_file = splitext(traj_file)[0]

    with atooms.trajectory.Trajectory(traj_file) as traj:
        if mode == "separate_species":

            species = traj[0].distinct_species()
            analysis = pp.Partial(pp.StructureFactor,
                                  trajectory=traj,
                                  species=species,
                                  **kwargs)
            analysis.do()
            analysis = analysis.partial  # Dict with results for each species

            sks = []
            ks = analysis[(
                species[0],
                species[0])].grid  # Grid is the same for every combination.
            for spec1 in species:
                for spec2 in species:
                    sks.append(analysis[(spec1, spec2)].value)
        elif mode == "mix_species":
            analysis = pp.StructureFactorLegacy(traj, **kwargs)
            analysis.do()

    if out_file:
        if mode == "separate_species":
            columns = (ks, )
            fmt = "%.5g"
            header = "columns=k,"
            counter = 0
            for i, spec1 in enumerate(species):
                for j, spec2 in enumerate(species):
                    columns += (sks[counter], )
                    fmt += " %.8g"
                    header += "Sk_%s-%s," % (spec1, spec2)
                    counter += 1
            header = header[:-1]  # remove final comma.
        elif mode == "mix_species":
            columns = (analysis.grid, analysis.value)
            fmt = "%.5g %.5g"
            header = "columns=k,S"

        columns = np.column_stack(columns)
        np.savetxt(out_file, columns, fmt=fmt, header=header)
Exemple #3
0
 def test_sk_field_partial(self):
     """
     Test that weight works with partial correlation
     """
     # TODO: this test fails with python 3 because of a weird issue with xyz trajectory in atooms (_fallback)
     f = os.path.join(self.reference_path, 'kalj-small.xyz')
     ff = os.path.join(self.reference_path, 'kalj-small-field.xyz')
     th = trajectory.TrajectoryXYZ(f)
     p = postprocessing.Partial(postprocessing.StructureFactor, ['A', 'B'],
                                th, [4, 7.3, 10])
     from atooms.postprocessing.helpers import copy_field
     from atooms.trajectory import TrajectoryXYZ
     p.add_weight(trajectory=trajectory.TrajectoryXYZ(ff), field='field_B')
     p.compute()
     from atooms.system.particle import composition
     ref_value = numpy.array(
         [0.86716496871363735, 0.86986885176760842, 0.98112175463699136])
     zeros = numpy.zeros(3)
     self.assertLess(deviation(p.partial[('B', 'B')].value, ref_value),
                     2e-2)
     self.assertLess(deviation(p.partial[('A', 'A')].value, zeros), 2e-2)
     th.close()
Exemple #4
0
def calculate_radial_distribution_function(traj_file, out_file=None):

    with atooms.trajectory.Trajectory(traj_file) as traj:
        print("Averaging over %d snapshots" % len(traj))
        species = traj[0].distinct_species()

        analysis = pp.Partial(pp.RadialDistributionFunction,
                              trajectory=traj,
                              species=species,
                              dr=0.02)
        analysis.do()
        analysis = analysis.partial  # Dict with results for each species

        grs = []
        rs = analysis[(
            species[0],
            species[0])].grid  # Grid is the same for every combination.
        for spec1 in species:
            for spec2 in species:
                grs.append(analysis[(spec1, spec2)].value)

    if out_file:
        columns = (rs, )
        fmt = "%.5g"
        header = "columns=r,"
        counter = 0
        for i, spec1 in enumerate(species):
            for j, spec2 in enumerate(species):
                columns += (grs[counter], )
                fmt += " %.8g"
                header += "gr_%s-%s," % (spec1, spec2)
                counter += 1
        header = header[:-1]  # remove final comma.

        columns = np.column_stack(columns)
        np.savetxt(out_file, columns, fmt=fmt, header=header)
Exemple #5
0
def calculate_overlap(traj_file,
                      a,
                      out_file=None,
                      out_file_quantities=None,
                      collective=False,
                      **kwargs):
    r"""
    a is the coarse-graining length scale to determine when two density profiles are uncorrelated.
    Note: I use the so-called self-overlap, which is
    Q_s(t, t') = (1/N) \sum_i \theta( |r_i(t') - r_i(t) | - a ),
    which has very similar behavior to the self-intermediate scattering function.

    There also exists something called the collective overlap, which is
    Q_c(t, t') = (1/N) \sum_i \sum_j \theta( |r_i(t') - r_j(t)| - a ).

    See "Non-linear dynamic response of glass-forming liquids to random pinning" by Kob and Coslovich.
    """

    with atooms.trajectory.Trajectory(traj_file) as traj:
        nframes = len(traj.steps)
        species = traj[0].distinct_species()

        result = detect_and_fix_spacing(traj.steps)
        corrected_steps = result["corrected_steps"]
        traj.steps = corrected_steps.tolist()
        mode = result["mode"]
        if mode == "log":
            base = result["base"]
            max_exp = result["max_exp"]
            block_size = int(base**max_exp)
            print(
                "Detected a logarithmically spaced trajectory of %d frames with block_size %d ** %d. Num frames in block: %d"
                % (nframes, base, max_exp, traj.block_size))
        elif mode == "linear":
            spacing = result["spacing"]
            print(
                "Detected a linearly spaced trajectory of %d frames with spacing %d."
                % (nframes, spacing))

        tgrid = default_tgrid(traj.steps)

        if collective:
            func = pp.CollectiveOverlap
        else:
            func = pp.SelfOverlap
        analysis = pp.Partial(func,
                              trajectory=traj,
                              species=species,
                              tgrid=tgrid,
                              a=a,
                              **kwargs)
        analysis.do()
        analysis = analysis.partial  # Dict with results for each species

        Qs = []
        for i, spec in enumerate(species):
            Qs.append(np.array(analysis[spec].value))

    tgrid = np.array(tgrid, dtype=int)
    taus = np.zeros(len(species))

    for i, qt in enumerate(Qs):
        taus[i] = extract_tau(qt, tgrid)

    print("tau:", taus)

    if out_file:
        columns = (tgrid, )
        fmt = "%d"
        header = "columns=step,"
        for i, spec in enumerate(species):
            columns += (Qs[i], )
            fmt += " %.8g"
            header += "Q(t, a=%.3f)_species%s," % (a, spec)
        header = header[:-1]  # remove final comma.

        columns = np.column_stack(columns)
        np.savetxt(out_file, columns, fmt=fmt, header=header)

    if out_file_quantities:
        header = "# columns="
        data = ""
        for spec_i, spec in enumerate(species):
            header += "tau_%s,a_%s," % (spec, spec)
            data += "%.6g %.3g " % (taus[spec_i], a)
        header += "\n"
        data += "\n"

        with open(out_file_quantities, "w") as f:
            f.write(header)
            f.write(data)
Exemple #6
0
def calculate_self_intermediate_scattering_function(traj_file,
                                                    k_values,
                                                    out_file=None,
                                                    out_file_quantities=None,
                                                    **kwargs):
    """
    k_values contains a q value for each species; this would normally be the maximum of the first peak in the static structure factor.
    """

    # This is necessary because atooms-pp has bugs when k_values is not sorted in ascending order.
    # But we need to remember which k-value belongs to which species.
    k_values = np.array(k_values)
    unsorted_k_values = np.copy(k_values)
    sort_idx = np.argsort(k_values)
    k_values = k_values[sort_idx]

    with atooms.trajectory.Trajectory(traj_file) as traj:
        nframes = len(traj.steps)
        species = traj[0].distinct_species()
        print("Using q_values", unsorted_k_values, "for species", species)

        result = detect_and_fix_spacing(traj.steps)
        corrected_steps = result["corrected_steps"]
        traj.steps = corrected_steps.tolist()
        mode = result["mode"]
        if mode == "log":
            base = result["base"]
            max_exp = result["max_exp"]
            block_size = int(base**max_exp)
            print(
                "Detected a logarithmically spaced trajectory of %d frames with block_size %d ** %d. Num frames in block: %d"
                % (nframes, base, max_exp, traj.block_size))
        elif mode == "linear":
            spacing = result["spacing"]
            print(
                "Detected a linearly spaced trajectory of %d frames with spacing %d."
                % (nframes, spacing))

        tgrid = default_tgrid(traj.steps)
        # I don't know how to tell the library to compute a different q value for each species.
        # I just compute both q values for each species. Not very efficient.
        analysis = pp.Partial(pp.SelfIntermediateScattering,
                              trajectory=traj,
                              species=species,
                              tgrid=tgrid,
                              kgrid=k_values,
                              **kwargs)
        analysis.do()
        analysis = analysis.partial  # Dict with results for each species

        fks = []
        actual_k_values = []
        for i, spec in enumerate(species):
            # This happens if the q values are actually the same for all species.
            if len(analysis[species[0]].kgrid) == 1:
                fks.append(np.array(analysis[spec].value[0]))
                actual_k_values.append(analysis[spec].kgrid[0])
            else:
                fks.append(np.array(analysis[spec].value[sort_idx[i]]))
                actual_k_values.append(analysis[spec].kgrid[sort_idx[i]])

    print("Actual kgrid:", actual_k_values)
    # For log-spaced trajectories, it automatically adds t = 0 for some reason.
    actual_tgrid = np.array(analysis[species[0]].grid[1], dtype=int)
    taus = np.zeros(len(species))
    for i, fk in enumerate(fks):
        taus[i] = extract_tau(fk, actual_tgrid)
    print("tau:", taus)

    if out_file:
        columns = (actual_tgrid, )
        fmt = "%d"
        header = "columns=step,"
        for i, spec in enumerate(species):
            columns += (fks[i], )
            fmt += " %.8g"
            header += "F_s(t, k=%.2f)_species%s," % (actual_k_values[i], spec)
        header = header[:-1]  # remove final comma.

        columns = np.column_stack(columns)
        np.savetxt(out_file, columns, fmt=fmt, header=header)

    if out_file_quantities:
        header = "# columns="
        data = ""
        for spec_i, spec in enumerate(species):
            header += "tau_%s,q_%s," % (spec, spec)
            data += "%.6g %.3g " % (taus[spec_i], actual_k_values[spec_i])
        header += "\n"
        data += "\n"

        with open(out_file_quantities, "w") as f:
            f.write(header)
            f.write(data)
Exemple #7
0
def calculate_msd(traj_file,
                  num_partitions=1,
                  out_file=None,
                  out_file_quantities=None):
    ts = []
    msds = []
    derived_quantities = [
    ]  # contains dicts with diffusive time and diffusion coefficient.

    with atooms.trajectory.Trajectory(traj_file) as traj:
        nframes = len(traj)
        print("number of frames to analyze: %d" % nframes)
        species = traj[0].distinct_species()
        print("Detected %d different species, " % len(species), species)
        # base, max_exp, corrected_steps = detect_base_and_max_exp(traj.steps)
        result = detect_and_fix_spacing(traj.steps)
        corrected_steps = result["corrected_steps"]
        traj.steps = corrected_steps.tolist()
        mode = result["mode"]
        if mode == "log":
            base = result["base"]
            max_exp = result["max_exp"]

            # block_size = int(base ** max_exp)

            # tmin = traj.steps[0]
            # tmax = traj.steps[-1]
            nblocks = len(traj.steps) // traj.block_size
            # frames_per_partition = nframes // num_partitions
            frames_per_partition = (nblocks // 2) * traj.block_size
            print(
                "detected a logarithmically spaced trajectory with block_size %d ** %d. Num frames in block: %d"
                % (base, max_exp, traj.block_size))
        elif mode == "linear":
            spacing = result["spacing"]
            frames_per_partition = nframes // num_partitions
            print("Detected a linearly spaced trajectory with spacing %d" %
                  (spacing))
        else:
            print(
                "Cannot only partition a trajectory that is either log or linearly spaced. Setting num_partitions = 1."
            )
            num_partitions = 1

        for n in range(num_partitions):
            print("starting with partition %d/%d" % (n + 1, num_partitions))
            subtraj = Sliced(
                traj,
                slice(n * frames_per_partition,
                      (n + 1) * frames_per_partition))
            tgrid = default_tgrid(subtraj.steps)

            analysis = pp.Partial(pp.MeanSquareDisplacement,
                                  trajectory=subtraj,
                                  tgrid=tgrid,
                                  species=species)
            analysis.do()
            analysis = analysis.partial  # Dict with results for each species

            ts.append(analysis[species[0]].grid)
            this_msd = []
            this_derived_quantities = []
            for spec in species:
                this_msd.append(analysis[spec].value)
                this_derived_quantities.append(analysis[spec].analysis)
            msds.append(this_msd)
            derived_quantities.append(this_derived_quantities)

    if out_file:
        # Save
        columns = (ts[0], )
        fmt = "%d"
        header = "columns=step,"
        tau_D = np.zeros((num_partitions, len(species)))
        D = np.zeros((num_partitions, len(species)))
        min_signal_len = np.inf
        for n in range(num_partitions):
            for spec_i, spec in enumerate(species):
                columns += (msds[n][spec_i], )
                signal_length = len(columns[-1])
                if signal_length < min_signal_len:
                    min_signal_len = signal_length
                fmt += " %.8g"
                header += "msd_partition%d_species%s," % (n + 1, spec)
                tau_D[n, spec_i] = derived_quantities[n][spec_i].get(
                    'diffusive time tau_D', 0.0)
                D[n, spec_i] = derived_quantities[n][spec_i].get(
                    'diffusion coefficient D', 0.0)

        new_columns = ()
        for i in range(len(columns)):
            new_columns += (columns[i][:min_signal_len], )

        header = header[:-1]  # remove final comma.
        columns = np.column_stack(new_columns)
        np.savetxt(out_file, columns, fmt=fmt, header=header)

    if out_file_quantities:
        header = "columns="
        fmt = ""
        columns = ()
        for spec_i, spec in enumerate(species):
            header += "D_%s,tau_D_%s," % (spec, spec)
            fmt += "%.6g %.6g "
            columns += (D[:, spec_i], tau_D[:, spec_i])

        header = header[:-1]  # remove final comma.
        columns = np.column_stack(columns)
        np.savetxt(out_file_quantities,
                   columns,
                   fmt=fmt.strip(),
                   header=header)

    return ts, msds
Exemple #8
0
def calculate_dynamic_susceptiblility(traj_file,
                                      out_file=None,
                                      corr="self_overlap",
                                      tgrid=None,
                                      **kwargs):

    with atooms.trajectory.Trajectory(traj_file) as traj:
        nframes = len(traj.steps)
        species = traj[0].distinct_species()

        result = detect_and_fix_spacing(traj.steps)
        corrected_steps = result["corrected_steps"]
        traj.steps = corrected_steps.tolist()
        mode = result["mode"]
        if mode == "log":
            base = result["base"]
            max_exp = result["max_exp"]
            block_size = int(base**max_exp)
            print(
                "Detected a logarithmically spaced trajectory of %d frames with block_size %d ** %d. Num frames in block: %d"
                % (nframes, base, max_exp, traj.block_size))
        elif mode == "linear":
            spacing = result["spacing"]
            print(
                "Detected a linearly spaced trajectory of %d frames with spacing %d."
                % (nframes, spacing))

        if tgrid == None:
            tgrid = default_tgrid(traj.steps)

        chi4s = []

        if corr == "self_overlap":
            func = pp.Chi4SelfOverlap
            func_args = (traj, )
            func_kwargs = dict(tgrid=tgrid, **kwargs)
            analysis = pp.Partial(func, species, func_args, func_kwargs)
            analysis.do()
            analysis = analysis.partial  # Dict with results for each species
            tgrid = analysis[species[0]].grid
            for i, spec in enumerate(species):
                chi4s.append(np.array(analysis[spec].value))
        elif corr == "self_intermediate_scattering":
            func = pp.Susceptibility
            func_kwargs = dict(tgrid=tgrid, **kwargs)
            func_args = (pp.SelfIntermediateScattering, traj)
            analysis = pp.Partial(func, species, func_args, func_kwargs)
            analysis.do()
            analysis = analysis.partial  # Dict with results for each species
            tgrid = analysis[species[0]].grid[1]
            for i, spec in enumerate(species):
                chi4s.append(np.array(analysis[spec].value[i]))
        else:
            print("Did not recognize correlator %s" % corr)

    if out_file:
        columns = (tgrid, )
        fmt = "%d"
        header = "columns=step,"
        for i, spec in enumerate(species):
            columns += (chi4s[i], )
            fmt += " %.8g"
            if corr == "self_overlap":
                header += "chi4_Qs(t, a=%.3f)_species%s," % (kwargs['a'], spec)
            elif corr == "self_intermediate_scattering":
                # TODO: fix this to actual k values.
                header += "chi4_Fs(t, k=%.3f)_species%s," % (
                    analysis[spec].grid[0][i], spec)
        header = header[:-1]  # remove final comma.

        columns = np.column_stack(columns)
        np.savetxt(out_file, columns, fmt=fmt, header=header)