Esempio n. 1
0
    def plot(self, count, fit_args):
        fig = plt.figure(figsize=[11, 10])
        ax = fig.add_subplot(111)
        ax.set_yscale("log")
        for i in range(len(self.labels)):
            ax.plot([s[0] for s in self.data], [s[1][i] for s in self.data],
                    label=self.labels[i])

        ax.legend()

        savefig("losses_Count_{}.png".format(count), fit_args)
        plt.close(fig)
Esempio n. 2
0
def main():
    if not os.path.exists("../img"):
        os.mkdir("../img", 0755)
    fs = 44100  # in Hz
    length = 0.01  # in seconds
    t = np.linspace(0, length, num=length * fs)
    h_function = lambda t: b * np.exp(-t / tau)
    h = h_function(t)
    plt.plot(h, t, fs, 3, "h(t)", "Signal h(t)")
    plt.savefig("../img/4.1-transfer_function.png")

    x_function = lambda t: a1 * np.cos(2 * np.pi * f1 * t) + a2 * np.sin(
        2 * np.pi * f2 * t) + a3 * np.cos(2 * np.pi * f3 * t)
    x = x_function(t)
    plt.plot(x, t, fs, 10, "x(t)", "Signal x(t)")
    yx = np.convolve(h, x)
    plt.plot(np.array([x, yx[0:len(t)]]), t, fs, 10, "yx(t)", "Signal yx(t)")
    plt.savefig("../img/4.2-sines.png")

    z_function = lambda t: A * (abs((t - T / 2) / T) < 1.0)
    z = z_function(t)
    yz = np.convolve(h, z)
    plt.plot(np.array([z, yz[0:len(t)]]), t, fs, 10, "yz(t)", "Signal yz(t)")
    plt.savefig("../img/4.3-rect.png")

    w, tw, rate, size, max = wav.readWav("../3-audio/af2.wav")
    h = h_function(tw)  # new sample rate and time vector
    yw = np.convolve(h, w)
    plt.plot(np.array([w, yw[0:len(tw)]]), tw, rate, 10, "yw(t)",
             "Signal yw(t)")
    rel_max = np.amax(yw) if np.amax(yw) > abs(np.amin(yw)) else abs(
        np.amin(yw))
    yw = yw * (max / rel_max)
    wav.write("convoluted.wav", rate, yw.astype(np.dtype('i2')))
    plt.savefig("../img/4.4-audio.png")
Esempio n. 3
0
def main():
    w, tw, rate, size, max = wav.readWav("af2.wav")
    Ew = np.sum(np.square(w)) / rate
    Pw = np.sum(np.square(w)) / size
    # Get 10 random intervals and measure the power of the signal
    for i in range(10):
        interval = [random.randint(0, size)]
        interval.append(random.randint(0, size))
        interval.sort()
        Pwi = np.sum(np.square(
            w[interval[0]:interval[1] + 1])) / (interval[1] - interval[0])
        print("Power for " + str(round(interval[0] / float(rate), 5)) +
              "s - " + str(round(interval[1] / float(rate), 5)) + "s = " +
              str(round(Pwi, 5)) + "W")
    # Plot the audio signal
    plt.plot(
        w, tw, rate, 10, "w(t)", "Signal w(t)", "Energy: " +
        str(round(Ew, 3)) + " W, Power: " + str(round(Pw, 3)) + " J")
    # Save the plot into an image file
    if not os.path.exists("img"):
        os.mkdir("img", 0755)
    plt.savefig("img/3-AudioSignal.png")
    plt.show()
Esempio n. 4
0
ax.text(d+s/2.0,   d+s/2.0,         '$H_{z(i+1/2,j+1/2,k)}$', color = 'blue', verticalalignment = 'bottom', horizontalalignment = 'center')

# E arrows              x           y           dx      dy
ax.add_patch(plt.Arrow(d+s/2.0,     d,          d,      0.0,    width = d/2.0, edgecolor='none', facecolor = 'red'))
ax.add_patch(plt.Arrow(d,           d+s/2.0,    0,      d,      width = d/2.0, edgecolor='none', facecolor = 'red'))
ax.add_patch(plt.Arrow(d/2.0,       d/2.0,      -d/3.0, -d/3.0, width = d/2.0, edgecolor='none', facecolor = 'red'))
# E text
ax.text(d+s/2.0,    d,          '$E_{x(i+1/2,j,k)}$', color = 'red', verticalalignment = 'bottom', horizontalalignment = 'left')
ax.text(d,          d+s/2.0,    '$E_{y(i,j+1/2,k)}$', color = 'red', verticalalignment = 'bottom', horizontalalignment = 'right')
ax.text(d/3.0,      d/3.0,      '$E_{z(i,j,k+1/2)}$', color = 'red', verticalalignment = 'top',    horizontalalignment = 'left')

ax.set_xlim((-d/7.0, s+d+d/7.0))
ax.set_ylim((-d/7.0, s+d+d/7.0))

# ******************************************************************************************
# QFDTD cell
fig_qfdtd = plot.figure()
ax = fig_qfdtd.add_subplot(1,1,1, aspect = 'equal', xticks=[], yticks=[], frameon=False)

plot_cell(ax)

ax.plot([d], [d], 'o', markeredgecolor = 'green', markerfacecolor = 'green', markersize = 14)
ax.text(1.1*d, 0.9*d, '$\psi_{i,j,k}$', color = 'green', verticalalignment = 'top', horizontalalignment = 'left')

ax.set_xlim((-d/7.0, s+d+d/7.0))
ax.set_ylim((-d/7.0, s+d+d/7.0))

for ext in ['pdf', 'svg']:
    plot.savefig(['fdtd_cell_yee.' + ext, 'fdtd_cell_qfdtd.' +  ext])
plot.show()
Esempio n. 5
0
def main():
    # setup output directory
    d = datetime.datetime.today()
    output_folder = "out/{}-{}-{}_{}:{}:{}".format(d.year, d.month, d.day, d.hour, d.minute, d.second)
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    os.chdir(output_folder)

    # load dataset
    datasets = load_data()

    train_set_x, train_set_y = util.shared_dataset(datasets[0])
    valid_set_x, valid_set_y = util.shared_dataset(datasets[1])
    test_set_x, test_set_y = util.shared_dataset(datasets[2])

    train_set = (train_set_x, train_set_y)
    valid_set = (valid_set_x, valid_set_y)
    test_set = (test_set_x, test_set_y)

    n_input = train_set_x.get_value(borrow=True).shape[1]
    n_output = train_set_y.get_value(borrow=True).shape[1]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size

    # numpy random generator
    # start-snippet-3
    numpy_rng = numpy.random.RandomState(89677)
    print '... building the model'
    # construct the stacked denoising autoencoder class
    sda = SdA(
        numpy_rng=numpy_rng,
        n_ins=n_input,
        hidden_layers_sizes=[1000, 1000, 1000],
        n_outs=n_output
    )

    predict_fn = sda.build_predict_function()

    #########################
    # PRETRAINING THE MODEL #
    #########################
    print '... getting the pretraining functions'
    pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size)

    print '... pre-training the model'
    start_time = time.clock()
    ## Pre-train layer-wise
    corruption_levels = [.1, .2, .3]
    for i in xrange(sda.n_layers):
        # go through pretraining epochs
        for epoch in xrange(pretraining_epochs):
            # go through the training set
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretraining_fns[i](index=batch_index,
                                            corruption=corruption_levels[i],
                                            lr=pretrain_lr))
            print("Pre-training layer {}, epoch {}, cost ".format(i, epoch)),
            print("{}".format(numpy.mean(c)))

    end_time = time.clock()

    print >> sys.stderr, ('The pretraining code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))


    ########################
    # FINETUNING THE MODEL #
    ########################

    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model, test_model = sda.build_finetune_functions(
        datasets=(train_set, valid_set, test_set),
        batch_size=batch_size,
        learning_rate=finetune_lr
    )

    print '... finetunning the model'
    # early-stopping parameters
    patience = 10 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    test_score = 0.
    start_time = time.clock()

    done_looping = False
    epoch = 0

    while (epoch < training_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_fn(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = validate_model()
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if (
                                this_validation_loss < best_validation_loss *
                                improvement_threshold
                    ):
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = test_model()
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print(
        (
            'Optimization complete with best validation score of %f %%, '
            'on iteration %i, '
            'with test performance %f %%'
        )
        % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
    )
    print >> sys.stderr, ('The training code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))

    ###########
    # PREDICT #
    ###########
    y_pred = predict_fn(test_set_x.get_value(borrow=True))
    mae, mre = util.calculate_error_indexes(test_set_y, y_pred)
    print("-*-*RESULT*-*-")
    print("mae={}".format(mae))
    print("mre={}".format(mre))

    # plot
    for i in xrange(n_output):
        filename = "{}.png".format(str(i))
        plot.savefig(filename, test_set_x, y_pred, indexes=[i])
Esempio n. 6
0
axes = [ax1, ax2]

coulombic.Plot( axes, ':k', 'Coulomb')
harmonic.Plot(  axes, '-b', 'Harmonic')
sg_one.Plot(    axes, '-r', 'Super-Gaussian (m=1)')
sg_three.Plot(  axes, '-c', 'Super-Gaussian (m=3)')
cs.Plot(        axes, '-m', 'Charge distribution')


ax1.grid()
ax1.set_ylabel('Potential [Eh]')
ax2.grid()
ax2.set_ylabel('Field [a.u.]')
ax2.set_xlabel('r [Bohr]')

ax1.set_ylim((0.0, 1.25*phi0))
ax2.set_ylim((-2.0, 0.05))
plot.setp(ax1.get_xticklabels(), visible=False)


leg = ax1.legend(loc = "best")
leg.get_frame().set_alpha(0.6)
#leg.set_zorder(100)
ax2.set_zorder(-100)

plot.savefig('potential_shapes.pdf')
plot.savefig('potential_shapes.svg')
plot.show()


Esempio n. 7
0
        cooling_indices = np.where(NumericalHeating[:, index1, index2] <= 0.0)
        if (len(cooling_indices[0]) >= 1):
            ax2_eV.plot(base_potentials[cooling_indices], abs(NumericalHeating[:, index1, index2][cooling_indices]),
                        '.' + colors_and_symbols.symb_col(c))
        c += 1


ax1_eV.set_xlabel(r"$\Delta t$ [as]")
ax1_eV.set_xlim((dts[0], dts[-1]))
plt.setp(ax1_Eh.get_xticklabels(), visible=False)

ax2_eV.set_xlabel("Potential depth [Hartree]")
ax2_eV.set_xlim((base_potentials[0], base_potentials[-1]))

for ax in [ax1_eV, ax2_eV]:
    leg = ax.legend(loc = "best")
    leg.get_frame().set_alpha(0.75)
    ax.grid(True)
    ax.set_ylabel("Energy change [eV]")
    ax.set_xscale('log')
    ax.set_yscale('log')

for ax in [ax1_Eh, ax2_Eh]:
    ax.set_xscale('log')
    ax.set_yscale('log')
    ax.set_ylabel("Energy change [Hartree]")

for ext in ['pdf', 'svg']:
   plot.savefig(['numerical_heating_dt.' + ext, 'numerical_heating_D.' +  ext])
plot.show()
Esempio n. 8
0
## Bound electron, now free
ax4.plot([0.0], [Xe_Z0_es_E[0]], 'og', ms = 14, alpha = 0.4)
ax4.plot([0.0], [Ke2+Xe_Z0_es_E[0]], 'og', ms = 14)
ar1Ke = fleches.arrow('', [0.0, Xe_Z0_es_E[0]], [0.0, Ke2+Xe_Z0_es_E[0]])    # Vertical (Ke)
ar1Ke.Plot(ax4, color = 'g')
ar2Ke = fleches.arrow('', [0.0, Ke2+Xe_Z0_es_E[0]],  [0.0+2.5, Ke2+Xe_Z0_es_E[0]]) # Horizontal (v)
ar2Ke.Plot(ax4, color = 'g', label = "$v_{e,3}$", verticalalignment = 'top')

for ax in [ax1, ax2, ax3, ax4]:
    ax.set_xlabel('r [bohr]')
    ax.set_ylabel('Energy [Hartree]')

    #ax.set_ylim((Umin, 0.05*abs(Umin)))
    ax.set_ylim((0.97*Umin, 1.05*Ke))
    #leg = ax.legend(loc = "lower right")
    ax.axhline(0.0, linestyle = ':', color = 'k')


ax1.text(0.9*r[0], 0.9*Umin, 'a)')
ax2.text(0.9*r[0], 0.9*Umin, 'b)')
ax3.text(0.9*r[0], 0.9*Umin, 'c)')
ax4.text(0.9*r[0], 0.9*Umin, 'd)')

# Remove overlapping labels
ax3.get_xticklabels()[-1].set_visible(False)
ax4.get_xticklabels()[0].set_visible(False)

for ext in ['pdf', 'svg']:
   plot.savefig('ionization_aci.' + ext)
plot.show()
Esempio n. 9
0
def main():
    # setup output directory
    d = datetime.datetime.today()
    output_folder = "out/{}-{}-{}_{}:{}:{}".format(d.year, d.month, d.day,
                                                   d.hour, d.minute, d.second)
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    os.chdir(output_folder)

    # load dataset
    datasets = load_data()

    train_set_x, train_set_y = util.shared_dataset(datasets[0])
    valid_set_x, valid_set_y = util.shared_dataset(datasets[1])
    test_set_x, test_set_y = util.shared_dataset(datasets[2])

    train_set = (train_set_x, train_set_y)
    valid_set = (valid_set_x, valid_set_y)
    test_set = (test_set_x, test_set_y)

    n_input = train_set_x.get_value(borrow=True).shape[1]
    n_output = train_set_y.get_value(borrow=True).shape[1]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size

    # numpy random generator
    # start-snippet-3
    numpy_rng = numpy.random.RandomState(89677)
    print '... building the model'
    # construct the stacked denoising autoencoder class
    sda = SdA(numpy_rng=numpy_rng,
              n_ins=n_input,
              hidden_layers_sizes=[1000, 1000, 1000],
              n_outs=n_output)

    predict_fn = sda.build_predict_function()

    #########################
    # PRETRAINING THE MODEL #
    #########################
    print '... getting the pretraining functions'
    pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
                                                batch_size=batch_size)

    print '... pre-training the model'
    start_time = time.clock()
    ## Pre-train layer-wise
    corruption_levels = [.1, .2, .3]
    for i in xrange(sda.n_layers):
        # go through pretraining epochs
        for epoch in xrange(pretraining_epochs):
            # go through the training set
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretraining_fns[i](index=batch_index,
                                            corruption=corruption_levels[i],
                                            lr=pretrain_lr))
            print("Pre-training layer {}, epoch {}, cost ".format(i, epoch)),
            print("{}".format(numpy.mean(c)))

    end_time = time.clock()

    print >> sys.stderr, ('The pretraining code for file ' +
                          os.path.split(__file__)[1] + ' ran for %.2fm' %
                          ((end_time - start_time) / 60.))

    ########################
    # FINETUNING THE MODEL #
    ########################

    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model, test_model = sda.build_finetune_functions(
        datasets=(train_set, valid_set, test_set),
        batch_size=batch_size,
        learning_rate=finetune_lr)

    print '... finetunning the model'
    # early-stopping parameters
    patience = 10 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    test_score = 0.
    start_time = time.clock()

    done_looping = False
    epoch = 0

    while (epoch < training_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_fn(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = validate_model()
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if (this_validation_loss <
                            best_validation_loss * improvement_threshold):
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = test_model()
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print(('Optimization complete with best validation score of %f %%, '
           'on iteration %i, '
           'with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The training code for file ' +
                          os.path.split(__file__)[1] + ' ran for %.2fm' %
                          ((end_time - start_time) / 60.))

    ###########
    # PREDICT #
    ###########
    y_pred = predict_fn(test_set_x.get_value(borrow=True))
    mae, mre = util.calculate_error_indexes(test_set_y, y_pred)
    print("-*-*RESULT*-*-")
    print("mae={}".format(mae))
    print("mre={}".format(mre))

    # plot
    for i in xrange(n_output):
        filename = "{}.png".format(str(i))
        plot.savefig(filename, test_set_x, y_pred, indexes=[i])
Esempio n. 10
0
    pool = Pool(processes=12)

    pool.map(norm, range(fres.shape[1]))

    # plot
    res_tn = exp.load('%s_%s_time_normalized_scales_mean.npy' % (strain, feat),
                      memmap='r')

    dorder = exp.load('%s_%s_order.npy' % (strain, feat))

    datplt = alys.scales_to_array(res_tn, worms_first=False, order=dorder)
    fig = fplt.plot_array(datplt,
                          title='%s %s time normalized scales mean' %
                          (strain, feat))
    fplt.savefig(fig,
                 exp.figname('%s_%s_time_normalized_scales_mean.png' %
                             (strain, feat)),
                 width=2500)

    datplt = alys.scales_to_array(res_tn, worms_first=True, order=dorder)
    fig = fplt.plot_array(datplt,
                          title='%s %s time normalized scales mean' %
                          (strain, feat))
    fplt.savefig(fig,
                 exp.figname('%s_%s_time_normalized_scales_mean_2.png' %
                             (strain, feat)),
                 width=2500)

    #### Test on single worm
    #
    #from timer import timeit;
    #
	def save(self):
		plot.savefig(self.figpath.joinpath('%05d.png' % rounds_played))
Esempio n. 12
0
            ax_U_Eh.set_xlim(xlim)

if options.vmin != None or options.vmax != None:
    if plot_V:
        ylim = list(ax_V_Eh.get_ylim())
        if options.vmin != None:
            ylim[0] = options.vmin
        if options.vmax != None:
            ylim[1] = options.vmax
        ax_V_Eh.set_ylim(ylim)

if options.umin != None or options.umax != None:
    if plot_U:
        ylim = list(ax_U_eV.get_ylim())
        if options.umin != None:
            ylim[0] = options.umin
        if options.umax != None:
            ylim[1] = options.umax
        ax_U_Eh.set_ylim(ylim)

if plot_V:
    leg = ax_V_Eh.legend(loc="best")
    leg.get_frame().set_alpha(0.4)
# if (plot_U):
# leg = ax_U_Eh.legend(loc="best")
# leg.get_frame().set_alpha(0.4)

for ext in ["pdf", "svg"]:
    plot.savefig("potential_landscape." + ext)
plot.show()
Esempio n. 13
0
ar1Ke = fleches.arrow("K_e'", [0.0, 0.0], [0.0, Kep]) # Vertical (Ke)
ar2Ke = fleches.arrow("K_e'", [0.0, Kep*1.3], [5.0, Kep*1.3]) # Horizontal (v)
ar1Ke.Plot(ax2, color = 'b', label = r"$K_e'$   .", horizontalalignment = 'right', bidirectional = True)
ar2Ke.Plot(ax2, color = 'b', verticalalignment = 'bottom')
ax2.plot([0.0], [Kep*1.3], 'ob', ms = 14)

# Bound electron
ax2.plot([0.0], [-Xe_Z0_Ip], 'og', ms = 14, alpha = 0.4)
ax2.plot([0.0], [Kep], 'og', ms = 14)
ar2a = fleches.arrow("", [0.0, 0.0], [0.0, Kep]) # Vertical
ar2a.Plot(ax2, color = 'g', label = r".$K_e'$", horizontalalignment = 'left', bidirectional = True)
ar2b = fleches.arrow("K_e'", [0.0, Kep], [5.0, Kep]) # Horizontal (v)
ar2b.Plot(ax2, color = 'g', verticalalignment = 'bottom')

for ax in [ax1, ax2]:
    ax.set_xlabel('r [bohr]')
    ax.set_ylabel('Energy [Hartree]')

    #ax.set_ylim((Umin, 0.05*abs(Umin)))
    ax.set_ylim((0.97*Umin, 1.05*Ke))
    #leg = ax.legend(loc = "lower right")
    ax.axhline(0.0, linestyle = ':', color = 'k')


ax1.text(0.9*r[0], 0.9*Umin, 'a)')
ax2.text(0.9*r[0], 0.9*Umin, 'b)')

for ext in ['pdf', 'svg']:
   plot.savefig('ionization_impact.' + ext)
plot.show()
Esempio n. 14
0
def test_SdA(state_file=None, output_folder=None):
    # load data
    datasets = load_data(r=2, d=1)

    train_set_x, train_set_y = util.shared_dataset(datasets[0])
    valid_set_x, valid_set_y = util.shared_dataset(datasets[1])
    test_set_x, test_set_y = util.shared_dataset(datasets[2])

    train_set = (train_set_x, train_set_y)
    valid_set = (valid_set_x, valid_set_y)
    test_set = (test_set_x, test_set_y)

    n_input = train_set_x.get_value(borrow=True).shape[1]
    n_output = train_set_y.get_value(borrow=True).shape[1]

    # prepare output folder
    if output_folder is None:
        d = datetime.datetime.today()
        output_folder = "out/{0:04d}{1:02d}{2:02d}_{3:02d}{4:02d}{5:02d}".format(
            d.year, d.month, d.day, d.hour, d.minute, d.second)
        if not os.path.isdir(output_folder):
            os.makedirs(output_folder)

    # instantiate TestBed
    if state_file is None:
        bed = TestBed.new(n_input, [400, 400, 400], n_output, output_folder)
    else:
        bed = TestBed.load(state_file)

    ######################
    # PRETRAIN THE MODEL #
    ######################
    bed.pretrain(test_set_x, epochs=1, learning_rate=0.1, batch_size=1)

    ########################
    # FINETUNING THE MODEL #
    ########################
    bed.finetune(train_set,
                 valid_set,
                 test_set,
                 epochs=1000,
                 learning_rate=0.1,
                 batch_size=1)
    bed.finetune(train_set,
                 valid_set,
                 test_set,
                 epochs=1000,
                 learning_rate=0.01,
                 batch_size=1)
    bed.finetune(train_set,
                 valid_set,
                 test_set,
                 epochs=1000,
                 learning_rate=0.001,
                 batch_size=1)
    bed.finetune(train_set,
                 valid_set,
                 test_set,
                 epochs=1000,
                 learning_rate=0.0001,
                 batch_size=1)
    bed.finetune(train_set,
                 valid_set,
                 test_set,
                 epochs=1000,
                 learning_rate=0.00001,
                 batch_size=1)

    ###########
    # PREDICT #
    ###########
    y_pred = bed.predict(test_set_x.get_value(borrow=True))

    mae, mre, rmse = util.calculate_error_indexes(
        test_set_y.get_value(borrow=True), y_pred)
    print("-*-*RESULT*-*-")
    print("mae={}".format(mae))
    print("mre={}".format(mre))
    print("rmse={}".format(rmse))

    # plot
    os.chdir(output_folder)
    cut = min(10 * 144, test_set_x.get_value(borrow=True).shape[0])
    plot_y = test_set_x.get_value(borrow=True)[:cut]
    plot_y_pred = y_pred[:cut]
    for i in xrange(n_output):
        filename = "{}.png".format(str(i))
        plot.savefig(filename, plot_y, plot_y_pred, indexes=[i])
Esempio n. 15
0
X,Y = np.meshgrid(y, y)
print "--> The warning 'UserWarning: Images are not supported on non-linear axes.' can be ignored."
print "--> Even though the axes are log-scaled, the rectangle with the colormap should be shown on linear scale."
print ""
#im = ax_eV.imshow(Z, cmap=cm.cool, alpha=.9, interpolation='bilinear', extent=extent)
#im = ax_eV.pcolormesh(x, y, Z, cmap=cm.cool, shading='gouraud')
im = ax_eV.pcolorfast(x, y, Z, cmap=cm.cool, alpha=0.7)
#im = ax_eV.pcolor(x, y, Z, cmap=cm.cool)
draw_text_box(ax_eV, "VUV",
              1.0e11, 1.0e15, 6.2, 17,
              color = 'none')
draw_text_box(ax_eV, "XUV",
              1.0e11, 1.0e15, 47, 124,
              color = 'none')

draw_text_box(ax_eV, "XFEL",
              1.0e11, 1.0e17, 250, 1000,
              color = 'blue')



ax_eV.set_xlim((I_min, I_max))
ax_eV.set_ylim((eV_min, eV_max))
ax_wl.set_ylim((eV_to_nm(eV_min), eV_to_nm(eV_max)))


for ext in ['pdf', 'svg']:
    plot.savefig('regimes.' + ext)
plot.show()

Esempio n. 16
0
                linestyle = colors_and_symbols.symbol(ei+5),
                color = colors_and_symbols.color(ei),
                label = Xe_Z0_es_n[ei])

# 3 electrons
al = 0.09 # arrow length
e1_xy = [0.25*r[0],  0.8*Umax]
e2_xy = [0.20*r[-1], 0.25*Umax]
e3_xy = [0.75*r[0],  0.1*Umax]
ar_e1 = fleches.arrow('e1', e1_xy, [e1_xy[0], e1_xy[1]    +al*dU])
ar_e2 = fleches.arrow('e2', e2_xy, [e2_xy[0], e2_xy[1]-2.0*al*dU])
ar_e3 = fleches.arrow('e3', e3_xy, [e3_xy[0], e3_xy[1]    +al*dU])
ar_e1.Plot(ax1, color = 'b')
ar_e2.Plot(ax1, color = 'b')
ar_e3.Plot(ax1, color = 'b')
ax1.plot([e1_xy[0]], [e1_xy[1]], 'ob', ms = 14)
ax1.plot([e2_xy[0]], [e2_xy[1]], 'ob', ms = 14)
ax1.plot([e3_xy[0]], [e3_xy[1]], 'ob', ms = 14)

ax1.set_xlabel('r [bohr]')
ax1.set_ylabel('Energy [Hartree]')

#leg = ax1.legend(loc = "lower right")
ax1.axhline(0.0, linestyle = ':', color = 'k')

ax1.set_ylim((0.98*Umin,1.02*Umax))

for ext in ['pdf', 'svg']:
   plot.savefig('heating_mbr.' + ext)
plot.show()
Esempio n. 17
0
            for d, daugther in enumerate(daugthers):
                alpha = 1.0 - (self.level) / (max_level+2.0)
                R = daugther.level / (max_level+2)
                #print 'd', d, '  level =', self.level, '  alpha = ', alpha, '  red =', R
                if (particles.is_inside(daugther)):
                    ax.plot([daugther.mins[0], daugther.maxs[0]], daugther.pos[1]*np.ones(2), '-', color = (R, 0, 0), linewidth = linewidth, alpha = alpha)
                    ax.plot(daugther.pos[0]*np.ones(2), [daugther.mins[1], daugther.maxs[1]], '-', color = (R, 0, 0), linewidth = linewidth, alpha = alpha)
                if (self.level < max_level and particles.is_inside(daugther)):
                    daugther.draw_daugthers(ax, particles)


x = r * np.cos(theta)
y = r * np.sin(theta)

particles = Part(x, y)
cell = Cell(0.0, 0.0, (2.05*abs(x).max(), 2.05*abs(y).max()))

fig = plot.figure()
ax = fig.add_subplot(1,1,1, xticks=[], yticks=[])
#ax = fig.add_subplot(1,1,1)

ax.plot(x[0:N/2], y[0:N/2], 'ob', ms = 14)
ax.plot(x[N/2:], y[N/2:],   'om', ms = 7)
cell.draw_quadtree(ax, particles)
ax.set_ylim(cell.mins[0], cell.maxs[0])
ax.set_ylim(cell.mins[1], cell.maxs[1])

plot.savefig('quadtree.svg')
plot.savefig('quadtree.pdf')
plot.show()
Esempio n. 18
0
ax2.plot(r, U2)
ax2.plot([-r0, r0], [Uep, Uep], '-m', label = '5p')

# Electron
ax2.plot([0.0], [Uep], 'og', ms = 14, alpha = 0.6)
ax2.plot([0.0], [Uep+gamma], 'og', ms = 14  )

# Photon
gamma = 0.5*Xe_Z0_Ip
ar_e1 = fleches.arrow('e1', [0.0, Uep], [0.0, Uep+gamma])
ar_e1.Plot(ax2, color = 'b', alpha = 0.5)
photon(ax2, [r[0]+lr*0.1, Uep], [0.0, Uep], Xe_Z0_Ip/5.0)


for ax in [ax1, ax2]:
    ax.set_xlabel('r [bohr]')
    ax.set_ylabel('Energy [Hartree]')

    #leg = ax.legend(loc = "lower right")
    ax.axhline(0.0, linestyle = ':', color = 'k')

ax1.set_ylim((0.98*Umin,1.02*Umax))

# Remove overlapping labels
ax1.get_xticklabels()[-1].set_visible(False)
ax2.get_xticklabels()[0].set_visible(False)

for ext in ['pdf', 'svg']:
   plot.savefig('heating_barrier_sup.' + ext)
plot.show()
Esempio n. 19
0
import plot as fplt

strain = 'N2'
feat = 'roam'

nbins = 2**13

save_fig = True

data = exp.load_data(strain)

### Bin data

sbins = exp.stage_bins(data, nbins=nbins)

d = getattr(data, feat)
tn = exp.bin_data(d, sbins)

exp.save(tn, '%s_%s_time_normalized.npy' % (strain, feat))

#fig = plt.figure(1); plt.clf();
#plt.imshow(tn, aspect = 'auto');
#plt.tight_layout();
#plt.title('%s %s time normalized' % (strain, feat));
#if save_fig:
#  fig.savefig(exp.figname('%s_%s_time_normalized.png'% (strain, feat)));

fig = fplt.plot_array(tn, title='%s %s time normalized' % (strain, feat))
fplt.savefig(fig,
             exp.figname('%s_%s_time_normalized.png' % (strain, feat)),
             width=2500)
Esempio n. 20
0
# Photon energy
ar1 = fleches.arrow('GammaE', [0.0, -Xe_Z0_Ip], [0.0, GammaE-Xe_Z0_Ip])
ar1.Plot(ax1, color = 'r', label = r'$E_\gamma$', horizontalalignment = 'left')
ax1.plot([0.0], [-Xe_Z0_Ip], 'ob', ms = 14, alpha = 0.6)

# New electron velocity
ar2 = fleches.arrow('N_e', [0.0, GammaE-Xe_Z0_Ip], [5.0, GammaE-Xe_Z0_Ip])
ar2.Plot(ax1, color = 'b', label = '$v_e$', horizontalalignment = 'center', verticalalignment = 'bottom')
ax1.plot([0.0], [GammaE-Xe_Z0_Ip], 'ob', ms = 14)

# Delta E
x = -1.0
ar3 = fleches.arrow('D_e', [x, 0.0], [x, GammaE-Xe_Z0_Ip])
ar3.Plot(ax1, color = 'g', label = '$\Delta E.$', horizontalalignment = 'right', bidirectional = True)

# Photon
#photon(ax1, [r[0]+lr*0.1, -Xe_Z0_Ip/2.0], [0.0, -Xe_Z0_Ip/2.0], Xe_Z0_Ip/5.0)
photon(ax1, [r[0]+lr*0.1, -Xe_Z0_Ip], [0.0, -Xe_Z0_Ip], Xe_Z0_Ip/5.0)

ax1.set_xlim((r[0], r[-1]))
ax1.set_ylim((Umin, Umax))

ax1.set_xlabel('r [bohr]')
ax1.set_ylabel('Energy [Hartree]')

for ext in ['pdf', 'svg']:
    plot.savefig('ionization_single.' + ext)
plot.show()

Esempio n. 21
0
def draw_histos(vis=False, filepath='/output/evalSave/'):
    print(filepath)
    if not os.path.exists(filepath):
        print('path created')
        os.makedirs(filepath)
    
    with open(os.path.join(filepath, 'eval.json'), 'r') as j:
        data = json.load(j)
        
    # build precision-recall curves
    prec_03, rec_03 = eval_plot.sort_prec_rec(data['evaluation']['prec']['0.3'], data['evaluation']['rec']['0.3'])
    eval_plot.plot_prec_rec(rec_03, prec_03, 'Thrs: 0.3', color=(1, 0, 1))
        
    prec_05, rec_05 = eval_plot.sort_prec_rec(data['evaluation']['prec']['0.5'], data['evaluation']['rec']['0.5'])
    eval_plot.plot_prec_rec(rec_05, prec_05, 'Thrs: 0.5', color=(0, 1, 0))
    
    # build smoothed precision-recall curves
    # smooth PR-curve as described in http://cs229.stanford.edu/section/evaluation_metrics.pdf#
    smoothed_prec_03 = [max(prec_03[idx:]) for idx, _ in enumerate(prec_03)]
    smoothed_prec_05 = [max(prec_05[idx:]) for idx, _ in enumerate(prec_05)]
    
    eval_plot.plot_prec_rec(rec_03, smoothed_prec_03, 'Thrs_smoothed: 0.3', color=(1, 0, 1), linestyle="--")
    eval_plot.plot_prec_rec(rec_05, smoothed_prec_05, 'Thrs_smoothed: 0.5', color=(0, 1, 0), linestyle="--")
    
    #auc_03 = np.trapz(smoothed_prec_03, rec_03)
    #auc_05 = np.trapz(smoothed_prec_05, rec_05)

    auc_03 = np.trapz(prec_03, rec_03)
    auc_05 = np.trapz(prec_05, rec_05)
    
    title = "conf. stride: {:0.2f}, max. conf.: {:0.4f}, AUC_03: {:0.2f}, AUC_05: {:0.2f}".\
                                                format(data['confidence stride'], data['max_conf'], auc_03, auc_05)

    eval_plot.config(data['name'], title=title)
 
    
    eval_plot.savefig(os.path.join(filepath, (data['name'] + '.svg')))
    if vis:
        eval_plot.show()
    else:
        eval_plot.clearfig()

    # build mean iou - recall curves        
    postfix = ['', '_low', '_mid', '_high']
    
    for p in postfix:        
        eval_plot.plot_prec_rec(data['evaluation']['rec' + p]['0.3'], data['evaluation']['m_iou' + p]['0.3'], 'halt au', color=(1, 0, 1))
                
        eval_plot.plot_prec_rec(data['evaluation']['rec' + p]['0.5'], data['evaluation']['m_iou' + p]['0.5'], 'halt au', color=(0, 1, 0))
        
        title = "conf. stride: {:0.2f}, max. conf.: {:0.4f},\nAUC_03: {:0.2f}, AUC_05: {:0.2f}".\
                                                    format(data['confidence stride'], data['max_conf'], auc_03, auc_05)
               
        eval_plot.config(data['name'], title=title)
        
        eval_plot.savefig(os.path.join(filepath, 'm_iou' + p + '.svg'))
        if vis:
            eval_plot.show()
        else:
            eval_plot.clearfig()
    
    classes = ["Background", "Ferry", "Buoy", "Vessel/ship", "Speed boat", "Boat", "Kayak", "Sail boat", "Swimming person", "Flying bird/plane", "Other"]        
    
    num_classes = data['evaluation']['conf_mat']["0.3"][0][1]
    cm = np.array(data['evaluation']['conf_mat']["0.3"][0][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_03_05.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.3"][1][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_03_075.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.5"][0][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_05_05.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
    
    cm = np.array(data['evaluation']['conf_mat']["0.5"][1][0]).reshape((num_classes, num_classes))
    name = os.path.join(filepath, "confusion_matrix_05_075.svg")
    eval_plot.plot_confusion_matrix(cm, classes, name)
Esempio n. 22
0
# Threshold
ax1.axhline(0.0, color="k", ls="-", alpha=0.5)

# U(r)
ax1.plot(r, U, "--k", alpha=0.5, label="Unperturbed ion")

# Laser
ax1.plot(r, laser, "-r", label="Laser")

# U(r) + laser
ax1.plot(r, Ubent, "-k", label="Effective")

# Electron
ax1.plot([0.0], [-Xe_Z0_Ip], "ob", ms=14)
ar1 = fleches.arrow("Tunnel", [0.0, -Xe_Z0_Ip], [0.9 * r[-1], -Xe_Z0_Ip])
ar1.Plot(ax1, color="g")

ax1.set_xlim((r[0], r[-1]))
ax1.set_ylim((0.95 * Umin, Umax))

ax1.set_xlabel("r [bohr]")
ax1.set_ylabel("Energy [Hartree]")

leg = ax1.legend(loc="best")
leg.get_frame().set_alpha(0.75)

for ext in ["pdf", "svg"]:
    plot.savefig("ionization_tunnel." + ext)
plot.show()
Esempio n. 23
0
ax2.add_patch(mpatches.Circle(auger_1_xy, r_e, color='blue', ec="none", alpha = 1.0))

# Arrow for inner shell filling
arrow_end   = (auger_1_xy[0], auger_1_xy[1])
arrow_start = (auger_2_xy[0], auger_2_xy[1])
ax2.annotate('', xycoords='data',
             xy = arrow_end, xytext = arrow_start, textcoords='data',
             size = 20, arrowprops=dict(arrowstyle="simple",
                                        fc="g", ec="none",
                                        connectionstyle="arc3,rad=-0.3"))

# Outer shell electron leaving
auger_3_xy_new = np.array([None]*2)
angle = np.arctan2(auger_3_xy[1], auger_3_xy[0])
auger_3_xy_new[0] = 1.4 * el_r.max() * np.cos(angle)
auger_3_xy_new[1] = 1.4 * el_r.max() * np.sin(angle)
ax2.add_patch(mpatches.Circle(auger_3_xy_new, r_e, color='blue', ec="none", alpha=1.0))

# Arrow showing it
ar = arrow('AuE', auger_3_xy, auger_3_xy_new)    # Vertical (Ke)
ar.Plot(ax2, color = 'g')

# *******************************************************************************************
for ax in [ax1, ax2]:
    ax.set_xlim((-1.1*el_r.max(), 1.1*el_r.max()))
    ax.set_ylim((-1.1*el_r.max(), 1.1*el_r.max()))

for ext in ['pdf', 'svg']:
    plot.savefig(['auger_step_1.' + ext, 'auger_step_2.' +  ext])
plot.show()