Exemple #1
0
def plot_profiles_grouped_by_Q(vs_lamda=False):
    """Plot profiles of perturbations grouped by Q.

    Parameters
    ----------
    vs_x : bool (optional)
        If True, then profiles are plotted versus x, otherwise versus
        ZND reaction progress variable.

    """
    outdir_list = [
        # 'q_2=-1.0000000000000000e+00',
        # 'q_2=-1.0000000000000000e+01',
        # 'q_2=-2.0000000000000000e+01',
        # 'q_2=-3.0000000000000000e+01',
        'q_1=010.00',
        'q_1=020.00',
        'q_1=050.00',
    ]
    vs_x = not vs_lamda

    outdir_list = [os.path.join(OUTDIR, x) for x in outdir_list]

    fig, axes = plt.subplots(3, 2, figsize=figsize)

    styles = ['-', '--', '-.']

    for i, outdir in enumerate(outdir_list):
        profile_fn = 'profiles/profile-{}.txt'.format(TIME_STEP)
        profile_fn = os.path.join(outdir, profile_fn)
        data = np.loadtxt(profile_fn)

        x = data[:, 0]
        rho = data[:, 1]
        u = data[:, 2]
        p = data[:, 3]
        lamda_1 = data[:, 4]
        lamda_2 = data[:, 5]

        comp_vals = {}
        with open(os.path.join(outdir, 'computed-values.txt')) as f:
            for line in f.readlines():
                chunks = line.split('=')
                key = chunks[0].strip()
                value = float(chunks[1].strip())
                comp_vals[key] = value

        k = comp_vals['k_1']

        znd_data = np.loadtxt(os.path.join(outdir, 'znd-solution.txt'))

        znd_rho = znd_data[:, 2]
        znd_p = znd_data[:, 4]
        znd_lamda = znd_data[:, 5]

        # dT_drho = - znd_p / znd_rho**2
        # dT_dp = 1.0 / znd_rho

        # T = dT_drho * rho + dT_dp * p

        # exponent = np.exp(-E_ACT * znd_rho / znd_p)
        # dr_drho = k * (1 - znd_lamda) * exponent * (-E_ACT/znd_p)
        # dr_dp = k * (1 - znd_lamda) * exponent * E_ACT * znd_rho / znd_p**2
        # dr_dlamda = -k * exponent

        # rate = dr_drho * rho + dr_dp * p + dr_dlamda * lamda

        scaler_rho = np.max(np.abs(rho))
        scaler_u = np.max(np.abs(u))
        scaler_p = np.max(np.abs(p))
        scaler_lamda_1 = np.max(np.abs(lamda_1))
        # scaler_T = np.max(np.abs(T))
        # scaler_rate = np.max(np.abs(rate))

        scaler_rho = 1
        scaler_u = 1
        scaler_p = 1
        scaler_lamda_1 = 1
        scaler_lamda_2 = 1
        scaler_T = 1
        scaler_rate = 1

        if vs_lamda:
            x = znd_lamda

        axes[i, 0].plot(x, rho / scaler_rho, styles[0])
        axes[i, 0].plot(x, u / scaler_u, styles[1])
        axes[i, 0].plot(x, p / scaler_p, styles[2])
        axes[i, 1].plot(x, lamda_1 / scaler_lamda_1, styles[0])
        axes[i, 1].plot(x, lamda_2 / scaler_lamda_2, styles[1])
        # axes[i, 1].plot(x, T / scaler_T, styles[1])
        # axes[i, 1].plot(x, rate/scaler_rate, styles[2])

    axes[0, 0].set_ylabel(r'$Q_1 = 10$')
    axes[1, 0].set_ylabel(r'$Q_1 = 20$')
    axes[2, 0].set_ylabel(r'$Q_1 = 50$')

    if vs_x:
        axes[2, 0].set_xlabel(r'$x$')
        axes[2, 1].set_xlabel(r'$x$')
    else:
        axes[2, 0].set_xlabel(r'$\bar{\lambda}$')
        axes[2, 1].set_xlabel(r'$\bar{\lambda}$')

    if vs_x:
        axes[0, 0].set_xlim((-5, 0))
        axes[0, 0].set_ylim((-2e-9, 1.5e-9))
        axes[0, 1].set_xlim((-5, 0))
        axes[0, 1].set_ylim((-1e-10, 6e-10))

        axes[1, 0].set_xlim((-5, 0))
        axes[1, 0].set_ylim((-4e-9, 3e-9))
        axes[1, 1].set_xlim((-5, 0))
        axes[1, 1].set_ylim((-1e-10, 6e-10))

        axes[2, 0].set_xlim((-5, 0))
        axes[2, 0].set_ylim((-20e-10, 3e-10))
        axes[2, 1].set_xlim((-5, 0))
        axes[2, 1].set_ylim((-5e-11, 3e-11))

    fmt = FormatStrFormatter('%.e')

    # for i in [0, 1, 2]:
    #     for j in [0, 1]:
    #         y = axes[i, j].yaxis
    #         y.set_ticks(y.get_ticklocs()[::2])
    #         y.set_major_formatter(fmt)
    #         x = axes[i, j].xaxis
    #         x.set_ticks(x.get_ticklocs()[::2])

    fig.tight_layout(pad=0.1)

    if vs_x:
        outfile = 'eigval-perturbations-sub-super.pdf'
    else:
        outfile = 'eigval-perturbations-sub-super-vs-znd-lambda.pdf'

    savefig(outfile)
# Obtain clean data by removing outliers.
idx = np.where(theta_list != 0.0)[0]
q_clean = np.array(q_list[idx])
theta_clean = np.array(theta_list[idx])
freq_clean = np.array(freq_list[idx])

# Plot figure.
coord_x = 0.10
coord_y = 0.80
fig, (ax_1, ax_2) = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.canvas.mpl_connect('button_press_event', onclick_1)
fig.canvas.mpl_connect('button_press_event', onclick_2)
ax_1.plot(theta_clean, q_clean, '-')
line_1_onclicks, = ax_1.plot([], 'ro')
ax_1.set_xlabel(r'Activation energy $\theta$')
ax_1.set_ylabel(r'Heat release $q$')
ax_1.text(coord_x, coord_y, 'a', transform=ax_1.transAxes)
ax_1.grid()

ax_2.plot(freq_clean, q_clean, '-')
line_2_onclicks, = ax_2.plot([], 'ro')
ax_2.set_xlabel(r'Frequency $\alpha_{\mathrm{im}}$')
ax_2.set_ylabel(r'Heat release $q$')
ax_2.text(coord_x, coord_y, 'b', transform=ax_2.transAxes)
ax_2.grid()

fig.tight_layout(pad=0.1)

fn = 'neutral-stability.pdf'
savefig(fn)
# We accept a list of words from command line
# to generate graphs for.

WORDS = helpers.get_words()


if __name__ == "__main__":
    embeddings = helpers.load_embeddings()
    for word1 in WORDS:
        helpers.clear_figure()
        time_sims, lookups, nearests, sims = helpers.get_time_sims(embeddings, word1)

        words = lookups.keys()
        values = [ lookups[word] for word in words ]
        fitted = helpers.fit_tsne(values)
        if not len(fitted):
            print "Couldn't model word", word1
            continue

        # draw the words onto the graph
        cmap = helpers.get_cmap(len(time_sims))
        annotations = helpers.plot_words(word1, words, fitted, cmap, sims)

        if annotations:
            helpers.plot_annotations(annotations)

        helpers.savefig("%s_annotated" % word1)
        for year, sim in time_sims.iteritems():
            print year, sim

        idx = 20
        ax = axes[2 * i + 1, j]
        ax.plot(D_smooth[idx:], dD_dt[idx:], '-', lw=lw, rasterized=True)
        ax.plot(D_smooth[0], dD_dt[0], 'ro')
        # ax.set_xlabel(r'$D$')
        # ax.set_ylabel(r'$\mathrm{d}D/\mathrm{d}t$')
        # ax.yaxis.set_label_coords(0.03, 0.3, transform=fig.transFigure)

        if dD_dt.max() > 100:
            fmt = ticker.ScalarFormatter(useOffset=True, useMathText=True)
            fmt.set_powerlimits((-2, 2))
            ax.yaxis.set_major_formatter(fmt)

        if with_inset:
            ax_inset = inset_axes(ax, width='35%', height='35%', loc=2)
            ax_inset.plot(D_smooth[i:], dD_dt[i:], '-', lw=lw, rasterized=True)
            ax_inset.plot(D_smooth[0], dD_dt[0], 'ro')
            ax_inset.set_xlim((1.8, 2.5))
            ax_inset.set_ylim((-1, 1))

            # Remove ticks and labels from the inset.
            ax_inset.tick_params(left=False,
                                 bottom=False,
                                 labelleft=False,
                                 labelbottom=False)

fig.tight_layout(pad=0.1)

filename = 'time-series-and-phase-portraits-together.pdf'
savefig(filename, dpi=300)
Exemple #5
0
# We accept a list of words from command line
# to generate graphs for.

WORDS = helpers.get_words()

if __name__ == "__main__":
    embeddings = helpers.load_embeddings()
    for word1 in WORDS:
        helpers.clear_figure()
        time_sims, lookups, nearests, sims = helpers.get_time_sims(
            embeddings, word1)

        words = list(lookups.keys())
        values = [lookups[word] for word in words]
        fitted = helpers.fit_tsne(values)
        if not len(fitted):
            print("Couldn't model word", word1)
            continue

        # draw the words onto the graph
        cmap = helpers.get_cmap(len(time_sims))
        annotations = helpers.plot_words(word1, words, fitted, cmap, sims)

        if annotations:
            helpers.plot_annotations(annotations)

        helpers.savefig("%s_annotated" % word1)
        for year, sim in time_sims.items():
            print(year, sim)
Exemple #6
0
import numpy as np
import matplotlib.pyplot as plt

WORDS = helpers.get_words()
if __name__ == "__main__":
    embeddings = helpers.load_embeddings()

    for word1 in WORDS:
        time_sims, lookups, nearests, sims = helpers.get_time_sims(
            embeddings, word1)

        helpers.clear_figure()

        # we remove word1 from our words because we just want to plot the different
        # related words
        words = filter(lambda word: word.split("|")[0] != word1,
                       lookups.keys())

        values = [lookups[word] for word in words]
        fitted = helpers.fit_tsne(values)
        if not len(fitted):
            print "Couldn't model word", word1
            continue

        cmap = helpers.get_cmap(len(time_sims))
        annotations = helpers.plot_words(word1, words, fitted, cmap, sims)

        helpers.savefig("%s_shaded" % word1)
        for year, sim in time_sims.iteritems():
            print year, sim
Exemple #7
0
#!/usr/bin/env python
import os
import matplotlib.pyplot as plt

from saf.euler1d.linear import ASCIIReader

from helpers import FIGSIZE_NORMAL, savefig

dirname = 'time-series-gamma=1.1'
dirname = os.path.join('_output', dirname)

r = ASCIIReader(dirname)
t, d = r.get_time_and_detonation_velocity()

plt.figure(figsize=FIGSIZE_NORMAL)
plt.plot(t, d, '-')
plt.xlabel(r'$t$')
plt.ylabel(r'$\psi\prime$')
plt.xlim((0, 50))
plt.ylim((-3e-10, 3e-10))
plt.tight_layout(pad=0.1)

savefig('time-series-gamma=1.1.pdf')
Exemple #8
0
def znd_plot_data(x, rho, u, p, lamda):
    # Number of rows and columns in figures
    m, n = 3, 2
    E_ACT = 30.0
    X_LIM = -8

    fig, axes = plt.subplots(nrows=m, ncols=n, figsize=figsize)

    assert len(x) == len(rho)
    assert len(x) == len(u)
    assert len(x) == len(p)
    assert len(x) == len(lamda)

    # Density
    ax = axes[0, 0]
    for k, __ in enumerate(rho):
        cur_x = x[k]
        cur_rho = rho[k][1]
        cur_rho = cur_rho / cur_rho[-1]

        _znd_plot_quantity(cur_x, cur_rho, ax, k)
    ax.set_ylabel(r'$\bar{\rho}/\bar{\rho}_{\mathrm{s}}$')
    ax.set_xlabel(r'$x$')
    ax.set_xlim((X_LIM, 0))
    ax.set_yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
    ax.set_ylim((0, 1.0))

    # Progress variable
    ax = axes[0, 1]
    for k, __ in enumerate(lamda):
        cur_x = x[k]
        cur_lamda = lamda[k][1]

        _znd_plot_quantity(cur_x, cur_lamda, ax, k)
    ax.set_ylabel(r'$\bar{\lambda}$')
    ax.set_xlabel(r'$x$')
    ax.set_xlim((X_LIM, 0))
    ax.set_ylim((0, 1.0))

    # Pressure
    ax = axes[1, 0]
    for k, __ in enumerate(p):
        cur_x = x[k]
        cur_p = p[k][1]
        cur_p = cur_p / cur_p[-1]

        _znd_plot_quantity(cur_x, cur_p, ax, k)
    ax.set_ylabel(r'$\bar{p}/\bar{p}_{\mathrm{s}}$')
    ax.set_xlabel(r'$x$')
    ax.set_xlim((X_LIM, 0))
    ax.set_ylim((0.5, 1.0))

    # Velocity
    ax = axes[1, 1]
    for k, __ in enumerate(u):
        cur_x = x[k]
        cur_label = u[k][0]
        cur_u = u[k][1]
        cur_u = cur_u / cur_u[-1]

        _znd_plot_quantity(cur_x, cur_u, ax, k)
    ax.set_ylabel(r'$\bar{u}/\bar{u}_{\mathrm{s}}$')
    ax.set_xlabel(r'$x$')
    ax.set_xlim((X_LIM, 0))
    ax.set_ylim((0.5, 1.0))

    # Temperature
    ax = axes[2, 0]
    for k, __ in enumerate(p):
        cur_x = x[k]
        cur_p = p[k][1]
        cur_p = cur_p / cur_p[-1]
        cur_rho = rho[k][1]
        cur_rho = cur_rho / cur_rho[-1]
        cur_T = cur_p / cur_rho

        _znd_plot_quantity(cur_x, cur_T, ax, k)
    ax.set_ylabel(r'$\bar{T}/\bar{T}_{\mathrm{s}}$')
    ax.set_xlabel(r'$x$')
    ax.set_xlim((X_LIM, 0))
    ax.set_ylim((0.95, 2.8))

    # Reaction rate
    ax = axes[2, 1]
    for k, __ in enumerate(p):
        cur_x = x[k]
        cur_p = p[k][1]
        cur_rho = rho[k][1]
        cur_lamda = lamda[k][1]
        r = (1 - cur_lamda) * np.exp(-E_ACT * cur_rho / (cur_p))
        r = r / np.max(r)

        _znd_plot_quantity(cur_x, r, ax, k)
    ax.set_ylabel(r'$\bar{\omega}/\bar{\omega}_{\mathrm{max}}$')
    ax.set_xlabel(r'$x$')
    ax.set_xlim((X_LIM, 0))
    ax.set_ylim((0, 1.0))

    fig.tight_layout(pad=0.1, h_pad=0.55)
    savefig('znd-solutions.pdf')
        for i in range(len(chars_x)):
            line, = ax_chars.plot(chars_x[i], chars_t[i], 'k-', linewidth=lw)

        ax_chars.plot(chars_x[i_tail], chars_t[i_tail], 'k-', linewidth=2 * lw)
        ax_chars.plot(chars_x[i_head], chars_t[i_head], 'k-', linewidth=2 * lw)

        chars_2_t = [0.0, t]
        for i in range(len(chars_x)):
            chars_2_x = [chars_x[i][0], chars_x[i][0] - D * t]

        contact_x = [0.0, -D * t]
        contact_t = [0.0, t]
        ax_chars.plot(contact_x, contact_t, 'k-', linewidth=2 * lw)

        ax_chars.set_xlim((x_l, x_r))
        ax_chars.set_ylim((0, t))
        ax_chars.set_xlabel(r'$x$')
        ax_chars.set_ylabel(r'$t$')


if __name__ == '__main__':
    fig, axes = plt.subplots(nrows=3, ncols=2, figsize=FIGSIZE)

    plot_riemann_problem(1, axes[0, 0], axes[1, 0], axes[2, 0], 'a')
    plot_riemann_problem(2, axes[0, 1], axes[1, 1], axes[2, 1], 'b')

    fig.tight_layout(pad=0.1)

    savefig('riemann-problem.pdf')
#!/usr/bin/env python
r"""
Plot the carpet of the boundedness function :math:`$H(\alpha)$`.

"""
import numpy as np

from lib_normalmodes import CarpetAnalyzer

from helpers import FIGSIZE_NORMAL as FIGSIZE
from helpers import savefig

carpet_file = '_output/carpet.npz'

with np.load(carpet_file, 'r') as data:
    alpha_re = data['ALPHA_RE']
    alpha_im = data['ALPHA_IM']
    H = data['H']

analyzer = CarpetAnalyzer(alpha_re, alpha_im, H)
analyzer.print_minima()

fig = analyzer.get_carpet_contour_plot(FIGSIZE)
savefig('normal-modes-carpet.pdf')
plt.gca().set_prop_cycle(None)

# Plot subsonic-subsonic case.
p_sub = plt.plot(q_sub[idx_0_oscil], rate_0_oscil_sub[idx_0_oscil], '--')
color_sub = p_sub[0].get_color()

plt.plot(q_sub, rate_0_lower_sub, '--', color=color_sub)
plt.plot(q_sub, rate_0_upper_sub, '--', color=color_sub)

plt.plot(q_sub[idx_1], rates_sub[1, idx_1], '--')
plt.plot(q_sub, rates_sub[2, :], '--')
plt.plot(q_sub, rates_sub[3, :], '--')
plt.plot(q_sub[idx_4], rates_sub[4, idx_4], '--')

# Annotate modes.
ax = plt.gca()
ax.text(0.96, 0.05, '0', transform=ax.transAxes)
ax.text(0.69, 0.05, '1', transform=ax.transAxes)
ax.text(0.47, 0.05, '2', transform=ax.transAxes)
ax.text(0.30, 0.05, '3', transform=ax.transAxes)
ax.text(0.16, 0.05, '4', transform=ax.transAxes)
ax.text(0.02, 0.05, '5', transform=ax.transAxes)

plt.xlabel(r'$Q_2$')
plt.ylabel(r'$\alpha_{\mathrm{re}}$')
plt.xlim((-45, 0))
plt.ylim((-0.02, 1.22))
plt.tight_layout(pad=0.1)

savefig('eigval-re-alpha-vs-q_2-together.pdf')
Exemple #12
0
    c.e_act = e_act
    c.f = 1
    c.rho_a = 1.0
    c.p_a = 1.0
    c.ic_amplitude = 1e-10
    c.ic_type = 'znd'
    c.truncation_coef = 0.01

    return c


if __name__ == '__main__':
    data = {'eigval': None, 'simple': None}
    q = 10
    data['eigval'] = np.loadtxt(FILENAME_EIGVAL_TEMPLATE % q)
    data['simple'] = np.loadtxt(FILENAME_SIMPLE_TEMPLATE % q)

    # 3. Plot thermicity.
    plt.figure(figsize=figsize)
    ax = plt.gca()
    plt.plot(data['eigval'][:, 0], data['eigval'][:, 1], '-')
    plt.plot(data['simple'][:, 0], data['simple'][:, 1], '--')
    plt.xlim(-10, 0)
    plt.xlabel(r'$x$')
    plt.ylabel(r'Thermicity')
    plt.grid()

    plt.tight_layout(pad=0.1)

    savefig('eigval-thermicity.pdf')
Exemple #13
0
#!/usr/bin/env python
import os

from helpers import savefig
from lib_eigval_znd_solutions import znd_read_data, znd_plot_data


OUTPUT_DIR = os.path.join('_output', 'subsonic-supersonic')

Q_VALUES = [-1.0, -10.0, -20.0, -30.0]


x, rho, u, p, lamda_1, lamda_2 = znd_read_data(Q_VALUES, OUTPUT_DIR)
znd_plot_data(x, rho, u, p, lamda_1, lamda_2)

savefig('eigval-znd-solutions-subsonic-supersonic.pdf')
def dmd_synth_data():
    """Generate synthetic data and compute errors on found eigenvalues."""
    # Important constants
    AMPLITUDE = 1e-10
    NOISE_AMPLITUDE = 1e-13
    FREQ = 100

    def generate_synthetic_example(tfinal, true_eigvals):
        t = np.linspace(0, tfinal, num=tfinal * FREQ + 1)
        y = np.zeros_like(t)

        for i in range(len(true_eigvals)):
            gamma = true_eigvals[i].real
            omega = true_eigvals[i].imag
            y = y + AMPLITUDE * np.exp(gamma * t) * np.sin(omega * t)

        y = y * (1 + NOISE_AMPLITUDE * np.random.randn(len(y)))

        return t, y

    print('First example')
    tfinal_1 = 21
    true_eigvals_1 = np.array([
        0.3 + 0.2 * 1j,
    ])
    t_1, y_1 = generate_synthetic_example(tfinal_1, true_eigvals_1)

    p_1 = Postprocessor(t_1, y_1)
    appr_eigvals_1, error_res_1, error_fit_1 = p_1.extract_stability_info()

    print('Second example')
    tfinal_2 = 21
    true_eigvals_2 = np.array([
        0.7 + 0.1 * 1j,
        0.8 + 1.57 * 1j,
        0.6 + 2.76 * 1j,
        0.5 + 3.88 * 1j,
        0.01 + 15.62 * 1j,
    ])

    t_2, y_2 = generate_synthetic_example(tfinal_2, true_eigvals_2)

    p_2 = Postprocessor(t_2, y_2)
    appr_eigvals_2, error_res_2, error_fit_2 = p_2.extract_stability_info()

    if len(sys.argv) > 1:
        L = p_2._L
        amps = p_2.amps
        spectrum_dmd = dmd.get_amplitude_spectrum(appr_eigvals_2, amps, L)
        freq_dmd, amps_dmd = spectrum_dmd

        N = len(y_1)
        dt = (t_2[-1] - t_2[0]) / (len(t_2) - 1.0)
        yhat = np.fft.rfft(y_2)
        freq_fft = np.fft.rfftfreq(N, dt)
        # We multiply by 2, because positive and negative sides, but we plot
        # only positive side.
        # See http://www.mathworks.com/matlabcentral/answers/162846-amplitude-of-signal-after-fft-operation
        amps_fft = 2 * abs(yhat) / N

        # Rescale frequencies to be angular frequencies instead of linear.
        freq_dmd *= 2 * np.pi
        freq_fft *= 2 * np.pi

        fig, (ax_1, ax_2) = plt.subplots(nrows=1, ncols=2, figsize=figsize)
        ax_1.ticklabel_format(style='scientific')
        ax_1.plot(t_2, y_2, '-')
        ax_1.set_xlim(0, 21)
        ax_1.set_xlabel(r'$t$')
        ax_1.set_ylabel(r'$y_2$')
        ax_1.text(0.85, 0.8, '(a)', transform=ax_1.transAxes)
        ax_2.semilogy(freq_fft, amps_fft, '-')
        ax_2.semilogy(spectrum_dmd[0], spectrum_dmd[1], 'o')
        ax_2.set_xlim(0, 16)
        ax_2.set_ylim(1e-12, None)
        ax_2.set_xlabel('Frequency')
        ax_2.set_ylabel('Amplitude')
        ax_2.set_yticks(np.logspace(-1, -11, num=6))
        ax_2.text(0.85, 0.8, '(b)', transform=ax_2.transAxes)
        fig.tight_layout(pad=0.1, w_pad=1)
        savefig('dmd-synthetic-data-spectrum.pdf')
    else:
        print('Saving results into `_assets/dmd-synthetic-data.tex`')
        cur_stdout = sys.stdout
        filename = os.path.join('_assets', 'dmd-synthetic-data.tex')
        sys.stdout = open(filename, 'w')
        print_latex_table([true_eigvals_1, true_eigvals_2],
                          [appr_eigvals_1, appr_eigvals_2])
        sys.stdout = cur_stdout

    print('Timing algorithm using Example 1')
    REPEAT = 3
    NUMBER = 2
    times_cum = timeit.Timer(p_1.extract_stability_info).repeat(repeat=REPEAT,
                                                                number=NUMBER)
    times = [x / NUMBER for x in times_cum]
    print('{} loops; best of {}: {} s per loop'.format(NUMBER, REPEAT,
                                                       min(times)))
import numpy as np
import matplotlib.pyplot as plt

WORDS = helpers.get_words()
if __name__ == "__main__":
    embeddings = helpers.load_embeddings()

    for word1 in WORDS:
        time_sims, lookups, nearests, sims = helpers.get_time_sims(embeddings, word1)

        helpers.clear_figure()

        # we remove word1 from our words because we just want to plot the different
        # related words
        words = filter(lambda word: word.split("|")[0] != word1, lookups.keys())

        values = [ lookups[word] for word in words ]
        fitted = helpers.fit_tsne(values)
        if not len(fitted):
            print "Couldn't model word", word1
            continue

        cmap = helpers.get_cmap(len(time_sims))
        annotations = helpers.plot_words(word1, words, fitted, cmap, sims)

        helpers.savefig("%s_shaded" % word1)
        for year, sim in time_sims.iteritems():
            print year, sim

d_lin += comp_val['d_znd']

dir_nonlin = os.path.join('_output', 'nonlinear-theta=%.2f' % theta)
r_nonlin = Reader(dir_nonlin)
t_nonlin, d_nonlin = r_nonlin.get_time_and_detonation_velocity()

# Plotting
coord_x = 0.05
coord_y = 0.85
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=figsize)
ax1.plot(t_lin, d_lin, '-')
ax1.plot(t_nonlin, d_nonlin, '--')
ax1.set_xlim(50, 150)
ax1.set_ylim(1.98, 2.02)
ax1.set_xlabel(r'$t$')
ax1.set_ylabel(r'$D$')
ax1.text(coord_x, coord_y, 'a', transform=ax1.transAxes)

ax2.plot(t_lin, d_lin, '-')
ax2.plot(t_nonlin, d_nonlin, '--')
ax2.set_xlim(150, 250)
ax2.set_ylim(1.8, 2.2)
ax2.set_xlabel(r'$t$')
ax2.set_ylabel(r'$D$')
ax2.text(coord_x, coord_y, 'b', transform=ax2.transAxes)

fig.tight_layout(pad=0.1)

filename = 'linear-vs-nonlinear.pdf'
savefig(filename)

def plot_bifurcation_diagram(theta_array, bif_data, comparator):
    plt.figure(figsize=FIGSIZE)
    for i, theta in enumerate(theta_array):
        extrema = bif_data[i]
        thetas = theta * np.ones_like(extrema)
        plt.plot(thetas, extrema, 'k.', markersize=1, rasterized=True)
        plt.ylim((1.73, 2.07))

    plt.xlabel(r'$\theta$')
    plt.ylabel(r'Local %s of $D$' % comparator)
    plt.xlim((theta_array[0], theta_array[-1]))
    plt.tight_layout(pad=0.1)


if __name__ == '__main__':
    args = parse_args()
    N12 = args.N12
    comparator = args.comparator
    order = args.order
    start_time = args.start_time
    save = args.save

    theta, D = get_bifurcation_data(N12, start_time, comparator, order)
    plot_bifurcation_diagram(theta, D, comparator)

    fn = 'bif-diag-N12=%04d-comparator=%s-order=%d-start_time=%d.pdf'
    fn = fn % (N12, comparator, order, start_time)
    savefig(fn, dpi=300)
def main():
    parser = argparse.ArgumentParser(
        description="Plot semantic shift of words")
    parser.add_argument('-w',
                        '--words',
                        nargs='+',
                        help='List of words to plot',
                        required=True)
    parser.add_argument("-n",
                        "--neighbors",
                        type=int,
                        default=15,
                        help="Number of neighbors to plot",
                        required=True)
    parser.add_argument(
        "--protocol_type",
        type=str,
        help=
        "Whether to run test for Reichstagsprotokolle (RT) or Bundestagsprotokolle (BRD)",
        required=True)
    parser.add_argument("--model_folder",
                        type=str,
                        help="Folder where word2vec models are located",
                        required=False)

    args = parser.parse_args()
    words_to_plot = args.words
    n = args.neighbors

    if args.protocol_type == 'RT':
        embeddings = SequentialEmbedding.load(args.model_folder)

    if args.protocol_type == 'BRD':
        embeddings = SequentialEmbedding.load(args.model_folder)

    for word1 in words_to_plot:
        helpers.clear_figure()
        try:
            time_sims, lookups, nearests, sims = helpers.get_time_sims(
                embeddings, word1, topn=n)

            words = list(lookups.keys())
            values = [lookups[word] for word in words]
            fitted = helpers.fit_tsne(values)
            if not len(fitted):
                print(f"Couldn't model word {word1}")
                continue

            # draw the words onto the graph
            cmap = helpers.get_cmap(len(time_sims))
            annotations = helpers.plot_words(word1, words, fitted, cmap, sims,
                                             len(embeddings.embeds) + 1,
                                             args.protocol_type)
            print(f'Annotations:{annotations}')

            if annotations:
                helpers.plot_annotations(annotations)

            helpers.savefig(word1, args.protocol_type, n)
            for year, sim in time_sims.items():
                print(year, sim)
        except KeyError:
            print(f'{word1} is not in the embedding space.')
Exemple #19
0
i = np.argmax(freq_0)
theta_max_freq = theta_values[i]
msg = 'Maximum frequency {:{fmt}} at theta={:{fmt}}'
print(msg.format(freq_0[i], theta_max_freq, fmt=FMT))

msg = 'For theta_min={:{fmt}} rate={:{fmt}}, freq={:{fmt}}'
print(msg.format(theta_values[0], conjugate_rates[0], freq_0[0], fmt=FMT))

msg = 'For theta_max={:{fmt}} rate={:{fmt}}, freq={:{fmt}}'
print(msg.format(theta_values[-1], conjugate_rates[-1], freq_0[-1], fmt=FMT))

# Plotting.
coord_x = 0.05
coord_y = 0.70
fig, (ax_1, ax_2) = plt.subplots(2, 1, figsize=figsize)
ax_1.plot(theta_values, conjugate_rates, '-', label='Mode 0')
ax_1.set_xlim(theta_range)
ax_1.set_xlabel(r'Activation energy $\theta$')
ax_1.set_ylabel(r'Growth rate $\alpha_{\mathrm{re}}$')
ax_1.text(coord_x, coord_y, 'a', transform=ax_1.transAxes)

ax_2.plot(theta_values, freq_0, '-', label='Mode 0')
ax_2.set_xlim(theta_range)
ax_2.set_xlabel(r'Activation energy $\theta$')
ax_2.set_ylabel(r'Frequency $\alpha_{\mathrm{im}}$')
ax_2.text(coord_x, coord_y, 'b', transform=ax_2.transAxes)

fig.tight_layout(pad=0.1)

savefig('linear-spectrum.pdf')
# if len(e_list[idx]) > 0:
#     print('Number of fails: {}'.format(len(e_list[idx])))
#     print('Corresponding Q:')
#     for i in idx:
#         print('{:22.16e}'.format(q_list[i]))

# Cleaning the data by removing nonconverged cases.
idx = np.where(e_list != 0.0)[0]
q_clean = np.array(q_list[idx])
e_clean = np.array(e_list[idx])
f_clean = np.array(f_list[idx])
q_star_clean = np.array(q_star[idx])

ls_data = np.loadtxt('_output/lee-stewart-fig7-digitized-data.txt')
leestewart_q = ls_data[:, 1]
leestewart_e = ls_data[:, 0]

# Plot figure.
fig = plt.figure(figsize=figsize)
plt.semilogy(e_clean, q_star_clean, '-', label='Two-step chemistry')
plt.semilogy(leestewart_e, leestewart_q, '--', label='One-step chemistry')
plt.xlim((0, 50))
plt.ylim((0.1, 100.0))
plt.xlabel(r'Activation energy, $E$')
plt.ylabel(r'Max heat release, $Q$')
#plt.legend(loc='best')
plt.grid()
plt.tight_layout(pad=0.1)

savefig('eigval-neutral-curve-gamma=1.2.pdf')
Exemple #21
0
WORDS = helpers.get_words()

if __name__ == "__main__":
    embeddings = helpers.load_embeddings()
    all_lookups = {}
    all_sims = {}
    WORDS.sort()
    wordchain = "_".join(WORDS)

    helpers.clear_figure()
    for word1 in WORDS:
        time_sims, lookups, nearests, sims = helpers.get_time_sims(
            embeddings, word1)

        all_lookups.update(lookups)
        all_sims.update(sims)

    words = all_lookups.keys()
    values = [all_lookups[word] for word in words]
    fitted = helpers.fit_tsne(values)

    # draw the words onto the graph
    cmap = helpers.get_cmap(len(time_sims))

    # TODO: split the annotations up
    annotations = helpers.plot_words(WORDS, words, fitted, cmap, all_sims)
    if annotations:
        helpers.plot_annotations(annotations)

    helpers.savefig("%s_chain.png" % wordchain)
q = 4
theta = 0.95
tol = 1e-4

s = LeeStewartSolver(q, theta, tol)

alpha = 0.0290 + 0.8700j

result = s.solve_eigenvalue_problem(alpha)

print(result['eigenvalue'])

solution = result['pert']

fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=figsize)
ax1.plot(solution.x, solution.u_real, '-')
ax1.plot(solution.x, solution.lamda_real, '--')
ax1.set_xlim((-14, 0))
ax1.set_xlabel(r'$x$')
ax1.set_ylabel(r'$u\prime_\mathrm{re}, \lambda\prime_\mathrm{re}$')
ax2.plot(solution.x, solution.u_imag, '-')
ax2.plot(solution.x, solution.lamda_imag, '--')
ax2.set_xlim((-14, 0))
ax2.set_xlabel(r'$x$')
ax2.set_ylabel(r'$u\prime_\mathrm{im}, \lambda\prime_\mathrm{im}$')

fig.tight_layout(pad=0.1)

savefig('normal-modes-perturbations.pdf')
Exemple #23
0
import matplotlib.pyplot as plt

from helpers import FIGSIZE_NORMAL, savefig

OUTPUT_DIR = '_output'
RESULTS_FILE_0 = os.path.join(OUTPUT_DIR, 'results-gamma=1.2.txt')
LEESTEWART_FILE = 'lee-stewart-fig7-digitized-data.txt'
LEESTEWART_FILE = os.path.join(OUTPUT_DIR, LEESTEWART_FILE)

q_list_0, e_list_0, __ = np.loadtxt(RESULTS_FILE_0, unpack=True)
ls_e_0, ls_q_0 = np.loadtxt(LEESTEWART_FILE, unpack=True)

# Cleaning my data.
idx = np.where(e_list_0 != 0.0)[0]
q_clean_0 = q_list_0[idx]
e_clean_0 = e_list_0[idx]

plt.figure(figsize=FIGSIZE_NORMAL)
plt.hold(True)
plt.semilogy(e_clean_0, q_clean_0, '-', label='Present work')
plt.semilogy(ls_e_0[::2], ls_q_0[::2], 's', label='Normal modes')
plt.xlim((0, 50))
plt.ylim((0.1, 100))
plt.xlabel(r'Activation energy, $E$')
plt.ylabel(r'Heat release, $Q$')
#plt.legend(loc='best')
plt.grid()
plt.tight_layout(pad=0.1)

savefig('LeeStewart-comparison.pdf')