Exemple #1
0
def main():
    ticker = sys.argv[1]
    start_date = sys.argv[2]
    end_date = sys.argv[3]

    ticker_data = data.get(ticker, start_date, end_date)
    plot.plot_data(ticker_data)
Exemple #2
0
def plot_beam(dirname, input_beam, Rho, Phi, ref_output_fluence):
    filename = lambda name: os.path.join(dirname, name)
    
    if len(Rho) > 1:
        vfluence = np.vectorize(input_beam.fluence)
        ref_input_fluence = vfluence(*np.meshgrid(Rho, Phi)).T
        norm_input_fluence = ref_input_fluence / input_beam.ref_fluence
        norm_output_fluence = ref_output_fluence / input_beam.ref_fluence
        max_output_fluence = np.amax(norm_output_fluence)
        
        n_ref = -1
        for n, phi in enumerate(Phi):
            if n_ref < 0 or abs(phi - input_beam.phi_ref) < abs(Phi[n_ref] - input_beam.phi_ref):
                n_ref = n
        rholim = (Rho[0], Rho[-1])
        plot.plot_data(filename("fluences"), "Input and Output Fluence", ((Rho,)*2, None, rholim, rho_label), ((norm_input_fluence[:, n_ref], norm_output_fluence[:, n_ref]), None, None, fluence_rel_label), ("input beam", "output beam"))
        plot.plot_data(filename("fluences_norm"), "Normalized Input and Output Fluence", ((Rho,)*2, None, rholim, rho_label), ((norm_input_fluence[:, n_ref], norm_output_fluence[:, n_ref] / max_output_fluence), None, None, fluence_norm_rel_label), ("input beam", "output beam"))
        
        if len(Phi) > 1:
            FR, RF = np.meshgrid(Phi, Rho)
            XY, YX = RF * np.cos(FR), RF * np.sin(FR)
            stride_rho = max(len(Rho) // params.out_count_rho, 1)
            stride_phi = max(len(Phi) // params.out_count_phi, 1)
            plot.plot_projection(filename("fluence_in"), "Input Fluence", (XY, None, x_label), (YX, None, y_label), (norm_input_fluence, None, fluence_rel_label), (30, -60), (stride_rho, stride_phi))
            plot.plot_projection(filename("fluence_out"), "Output Fluence", (XY, None, x_label), (YX, None, y_label), (norm_output_fluence, None, fluence_rel_label), (30, -60), (stride_rho, stride_phi))
Exemple #3
0
def plot_inversion(dirname, inv):
    filename = lambda name: os.path.join(dirname, name)
    
    T = inv.T
    inversion = inv.inversion
    tlim = (T[0], T[-1])
    plot.plot_data(filename("inversion_evo"), "Population Inversion Evolution", (T, None, tlim, t_pump_label), (inversion, None, None, inversion_abs_label))
Exemple #4
0
def plot_train(dirname, input_beam, active_medium, output_photon_counts):
    filename = lambda name: os.path.join(dirname, name)
    
    pulse_count = len(output_photon_counts)
    pulse_nums = np.arange(1, pulse_count + 1)
    nlim = (pulse_nums[0] - 1, pulse_nums[-1] + 1)
    extra_args = dict(style="o", vlines=True, grid="y") if pulse_count <= 32 else {}
    input_photon_count = input_beam.fluence_integral(active_medium.radius)
    plot.plot_data(filename("pulse_energy_gain"), "Pulse Energy Gain", (pulse_nums, None, nlim, i_label), (output_photon_counts/input_photon_count, None, None, energy_rel_label), **extra_args)
Exemple #5
0
def test_plot_data_title(mock_plt):
    x = np.arange(0, 5, 0.1)
    y = np.sin(x)
    my_plot.plot_data(x, y, "my title")

    # Assert plt.title has been called with expected title arg
    mock_plt.title.assert_called_once_with("my title")

    # Assert plt.figure got called
    assert mock_plt.figure.called
Exemple #6
0
def main():
    channel_names = ['24', '25', '26', '27', '28', '29']
    data_channels = list()

    input('start recording by pressing any button, stop recording by using keyboard interrupt crtl+c: ')
    try:
        while True:
            data_channels.append(read())
    except KeyboardInterrupt:
        plot_data([channel_names, np.transpose(np.array(data_channels))])
def create_chart(results, x_label, y_label, filename):
    data_to_plot = []
    if config.CHECK_NO_MIGRATION:
        data_to_plot.append({'label': 'No Migration', 'data': results[0]})
    if config.CHECK_MODEL_1:
        data_to_plot.append({'label': 'Model 1', 'data': results[1]})
    if config.CHECK_MODEL_2:
        data_to_plot.append({'label': 'Model 2', 'data': results[2]})
    if config.CHECK_MODEL_3:
        data_to_plot.append({'label': 'Model 3', 'data': results[3]})
    plot_data(data_to_plot, x_label, y_label, config.RESULTS_DIR + filename)
Exemple #8
0
def simulate_seperable(data_size):
    """Simulate learning a completely seperable data set."""
    data = generate_sphere_data(10000, margin=0)
    train_data, test_data = split_list(data, 0.75)
    w = train(train_data, max_iter=500, r=0.01)
    error = test(test_data, w)
    status(train_data, test_data, error)

    plot_data(data)
    plot_w(data, w)
    show()
Exemple #9
0
def simulate_increasing(data_size, margin=0.3, max_iter=100, learning_rate=0.1,
                        steps=5, start=None, end=None):
    """Simulate learning an increasing training data set.

    Generates an unseperable data set, and trains on an increasing training
    set, then tests and plots.

    start: Initial (first step) training data set size.
    end: Final (last step) training data set size.
    """
    data = generate_sphere_data(data_size, margin=margin)
    train_data, test_data = split_list(data, 0.75)

    # Initialize start/end sizes if not given.
    start = len(train_data)/steps if start is None else start
    end = len(train_data) if end is None else end

    w_colors = ['b', 'c', 'm', 'y', 'k']  # w vector (line) graph color.
    w_gs = []  # w plot graphs.
    sizes = []  # Training data set sizes.
    success = []  # Success rates according to training data set sizes.
    for i in xrange(steps):
        # Increase training data size according to iteration.
        size = start + i*end/steps
        current_train_data = train_data[:size]

        w = train(current_train_data, max_iter=max_iter, r=learning_rate)
        error = test(test_data, w)

        status(current_train_data, test_data, error)
        print

        # Record size-success statistics.
        sizes.append(size)
        success.append(100 - error)

        # Plot decision boundary.
        w_color = w_colors[i] if i < len(w_colors) else w_colors[-1]
        figure(0)
        g, = plot_w(current_train_data, w, color=w_color)
        w_gs.append(g)

    figure(0).suptitle('Test data size: %d\nMaximum iterations: %d' % (len(test_data), max_iter))
    plot_w_legend(w_gs, sizes)
    plot_data(data)

    figure(1).suptitle('Success rate according to training set size.')
    plot_success_per_size(sizes, success)

    show()
Exemple #10
0
def simulate():
    """
    run simulation, save and plot results
    """
    simul_data = run_simulation()
    np.savetxt(outfile, simul_data.getArray(), delimiter='\t', newline='\n')

    # plot data
    plot_settings = {
        'title': "Hexagonal Lattice",
        'filename': outfile,
        'f': 1.0
    }
    # plot_settings = {'title': r"FCC Lattice with $\beta=64.55^{\circ}$", 'filename': outfile, 'f': 1.0}
    plot_data(plot_settings)
Exemple #11
0
def main():
    all_judgment_filenames = glob.glob(PATH)

    if not os.path.isfile(FILENAMES_FOR_YEAR):
        get_filenames_with_judgments_for_year(all_judgment_filenames)

    with open(FILENAMES_FOR_YEAR) as file:
        filenames_with_judgments_for_year = json.load(file)
        judgments = itertools.chain.from_iterable(
            map(get_judgments, filenames_with_judgments_for_year))

    judgment_texts = (clear_text(judgment['textContent'])
                      for judgment in judgments)

    if not os.path.isfile(WORD_COUNTS_FILENAME):
        counter = Counter(generate_words(judgment_texts))
        sorted_counter = Counter(
            dict(
                sorted(counter.items(),
                       key=operator.itemgetter(1),
                       reverse=True)))
        with open(WORD_COUNTS_FILENAME, 'w') as file:
            json.dump(sorted_counter, file)

    with open(WORD_COUNTS_FILENAME) as file:
        counter = json.load(file)

    plot_data(counter)
    dict_words = dictionary_words()
    diff = set(counter) - set(dict_words)

    if not os.path.isfile(HUGE_COUNTER_FILENAME):
        if not os.path.isfile(ALL_JUDGMENTS_FILENAME):
            create_file_with_all_judgments(all_judgment_filenames)
        counts_from_all_texts = create_counter_from_all_texts()
        with open(HUGE_COUNTER_FILENAME, 'w') as file:
            json.dump(counts_from_all_texts, file)

    with open(HUGE_COUNTER_FILENAME) as file:
        counts_from_all_texts = json.load(file)

    spell_corrector = SpellCorrector(counts_from_all_texts)

    with ProcessPoolExecutor(max_workers=6) as pool, open(
            CORRECTIONS, 'w', encoding='utf-8') as file:
        for word, corrected in zip(diff, pool.map(spell_corrector.correct,
                                                  diff)):
            print(word, '->', corrected, file=file)
def get_modgdf(complex_spec, complex_spec_time_scaled):
    """Get Modified Group-Delay Feature
    """
    mag_spec = get_mag_spec(complex_spec)
    cepstrally_smoothed_mag_spec = cepstrally_smoothing(mag_spec)
    plot_data(cepstrally_smoothed_mag_spec, "cepstrally_smoothed_mag_spec.png",
              "cepstrally_smoothed_mag_spec")

    real_spec = get_real_spec(complex_spec)
    imag_spec = get_imag_spec(complex_spec)
    real_spec_time_scaled = get_real_spec(complex_spec_time_scaled)
    imag_spec_time_scaled = get_imag_spec(complex_spec_time_scaled)

    __divided = real_spec * real_spec_time_scaled \
            + imag_spec * imag_spec_time_scaled
    __tao = __divided / (cepstrally_smoothed_mag_spec**(2. * GAMMA))
    __abs_tao = np.absolute(__tao)
    __sign = 2. * (__tao == __abs_tao).astype(np.float) - 1.
    return dct(__sign * (__abs_tao**ALPHA), type=2, axis=1, norm='ortho')
def create_chart (results, x_label, y_label, filename):
  data_to_plot = []
  if config.CHECK_NO_MIGRATION:
    data_to_plot.append({'label': 'No Migration', 'data': results[0]})
  if config.CHECK_SEMI_1_BF:
    data_to_plot.append({'label': 'SEMI-1 BF', 'data': results[1]})
  if config.CHECK_SEMI_1_FF:
    data_to_plot.append({'label': 'SEMI-1 FF', 'data': results[2]})
  if config.CHECK_SEMI_1_WF:
    data_to_plot.append({'label': 'SEMI-1 WF', 'data': results[3]})
  if config.CHECK_SEMI_2_BF:
    data_to_plot.append({'label': 'SEMI-2 BF', 'data': results[4]})
  if config.CHECK_SEMI_2_FF:
    data_to_plot.append({'label': 'SEMI-2 FF', 'data': results[5]})
  if config.CHECK_SEMI_2_WF: 
    data_to_plot.append({'label': 'SEMI-2 WF', 'data': results[6]})

  plot_data(
    data_to_plot,
    x_label,
    y_label,
    config.RESULTS_DIR + filename)
Exemple #14
0
  # training = outlier.outlier(retrieve.retrieve_data('trip_data_1.csv'), 3)
  # test = outlier.outlier(retrieve.retrieve_data('trip_data_2.csv'), 3)

  ## normalize the  matrix
  # part h - don't normalize train 2 (pickup time) bc of the binary modification
  for count in range(0, 8):
    if count != 2:
      training = util.normalize(training, count)
      test = util.normalize(test, count)

  ## plot the graphs
  # part c
  # axis dictionary, plot
  import plot
  axeslabels = ['Trip Time (sec)', 'Trip Distance (mi)', 'Pickup Time (hours)', 'Distance between Pickup & Dropoff (mi)']
  plot.plot_data(data, axeslabels)

  ## separating independent variables
  # part d & f
  xtrain = training[:,1]
  xtest = test[:,1]
  # part i
  # xtrain = np.delete(training, 0, axis=1)
  # xtest = np.delete(test, 0, axis=1)

  ## separating dependent variable
  ytrain = training[:,0]
  ytest = test[:,0]

  ## calculate linear regression
  w = util.linReg(xtrain, ytrain)
Exemple #15
0
#       LINEAR SVM
###############################################################################
#Load data
iris = datasets.load_iris()
X = np.vstack((iris.data[:, 0], iris.data[:, 1])).T
Y = iris.target

#Splitting data
X_temp, X_test, y_temp, y_test = train_test_split(X,
                                                  Y,
                                                  test_size=0.3,
                                                  random_state=70)
X_train, X_validation, y_train, y_validation = train_test_split(
    X_temp, y_temp, test_size=0.14, random_state=66)
#Plotting data
plot_data(X_train, X_test, X_validation, y_train, y_test, y_validation)

# Define usefull quantities
accuracy_list = []
C_value = np.array([10**-3, 10**-2, 10**-1, 1, 10, 10**2, 10**3])

#Classification for C in [10^-3,10^3]
classificators = []
#Plotter
my_plt = plotting_grid(fig_r=12, fig_c=11, grid_r=4, grid_c=2)

for j, counter in enumerate(C_value):
    clf = svm.SVC(kernel='linear', C=counter).fit(X_train, y_train)
    accuracy_list.append(clf.score(X_validation, y_validation))
    classificators.append(clf)
    my_plt.plot(X_validation, clf, "Classification for C = {}".format(counter),
Exemple #16
0
def compare_depop_models(dirname):
    filename = lambda name: os.path.join(dirname, name)
    
    if not params.ext_depop_models:
        return
    
    print output.div_line
    print "comparing depopulation models"
    
    active_medium = core.create_medium(None)
    pump_system = model.pump.PumpSystem(params.pump_wavelen, params.pump_duration, params.pump_power, params.pump_efficiency)
    data = []
    for depop_model_class in params.ext_depop_models:
        depop_model_label = depop_model_class.descr
        print depop_model_label
        depop_model = core.create_depop_model(active_medium, depop_model_class)
        inv = params.inverter_class(active_medium, pump_system, depop_model)
        inv.invert(params.inversion_rtol, params.inversion_min_count_t)
        depop_rate = np.vectorize(depop_model.rate)(inv.inversion) / active_medium.volume
        data.append((inv.T, inv.inversion, depop_rate, depop_model_class.descr, depop_model_class))
        ref_inversion = inv.inversion[-1]
        
        unitconv.print_result("population inversion [{}]: {}", ("cm^-3",), (ref_inversion,))
        unitconv.print_result("depopulation rate [{}]: {}", ("cm^-3 s^-1",), (depop_rate[-1],))
    
    if params.graphs:
        print output.status_writing
        dirname = os.path.join(dirname, output.models_rel_path)
        dirname = output.init_dir(dirname)
        data.sort(key = lambda x: x[1][-1], reverse=True)
        Ts, inversions, depop_rates, labels, depop_model_classes = zip(*data)
        plot.plot_data(filename("inversions_evo"), "Population Inversion Evolution", (Ts, None, None, output.t_pump_label), (inversions, None, None, output.inversion_abs_label), labels)
        pump_rate = pump_system.effective_pump_rate / active_medium.volume
        abs_rate_ylim = None #(0.0, pump_rate * 1.25)
        non_zero_Ts = [T[1:] for T in Ts]
        non_zero_inversions = [inversion[1:] for inversion in inversions]
        non_zero_rates = [depop_rate[1:] for depop_rate in depop_rates]
        rel_depop_rates = [depop_rate / inversion for depop_rate, inversion in zip(non_zero_rates, non_zero_inversions)]
        plot.plot_data(filename("depop_rates"), "Depopulation Rate", (inversions, None, None, output.inversion_abs_label), (depop_rates, None, abs_rate_ylim, output.rate_label), labels, yvals=[(pump_rate, "pump rate")])
        plot.plot_data(filename("depop_rates_alt"), "Depopulation Rate to Inversion Ratio", (non_zero_inversions, None, None, output.inversion_abs_label), (rel_depop_rates, None, None, output.rate_rel_label), labels)
        plot.plot_data(filename("depop_rates_evo"), "Depopulation Rate Evolution", (Ts, None, None, output.t_pump_label), (depop_rates, None, abs_rate_ylim, output.rate_label), labels, yvals=[(pump_rate, "pump rate")])
        plot.plot_data(filename("depop_rates_alt_evo"), "Depopulation Rate to Inversion Ratio Evolution", (non_zero_Ts, None, None, output.t_pump_label), (rel_depop_rates, None, None, output.rate_rel_label), labels)
        
        if params.ext_alt_depop_model not in depop_model_classes:
            return
        alt_model_idx = depop_model_classes.index(params.ext_alt_depop_model)
        alt_T = Ts[alt_model_idx]
        alt_inversion = inversions[alt_model_idx]
        altinvs = []
        aTs = []
        altinv_inversion_rdiffs = []
        for cls, T, inversion in zip(depop_model_classes, Ts, inversions):
            if cls is params.ext_alt_depop_model:
                continue
            uT = set(list(T) + list(alt_T))
            aT = np.array(sorted(list(uT)))
            altinv = np.interp(aT, alt_T, alt_inversion)[1:]
            inv = np.interp(aT, T, inversion)[1:]
            rdiff = np.fabs(inv - altinv) / np.fmin(inv, altinv)
            aTs.append(aT[1:])
            altinvs.append(altinv)
            altinv_inversion_rdiffs.append(rdiff)
        non_alt_labels = [label for i, label in enumerate(labels) if i != alt_model_idx]
        plot.plot_data(filename("inversions_rdiff_inv"), "Inversion Relative Difference", (altinvs, None, None, output.inversion_abs_label), (altinv_inversion_rdiffs, None, None, output.inversion_rdiff_label), non_alt_labels)
        plot.plot_data(filename("inversions_rdiff_evo"), "Inversion Relative Difference Evolution", (aTs, None, None, output.t_pump_label), (altinv_inversion_rdiffs, None, None, output.inversion_rdiff_label), non_alt_labels)
Exemple #17
0
beta_df = file_parameters[testcase][1]
beta = file_parameters[testcase][2]
rho = file_parameters[testcase][3]

# Create data to store plots and data (if the latter has not been created)
data_folder = './case' + testcase + '_files'
os.mkdir(data_folder)

if (os.path.isfile('../lorenz/data.csv')):

    df = pd.read_csv('../lorenz/data.csv')

    # Get only relevant slice of the full dataset
    df_case = df[(df['Sigma'] == sigma) & (df['Beta'] == beta_df) &
                 (df['Rho'] == rho)]
    pl.plot_data(df_case)

else:  # If data file is not there, calculate only the relevant dataset, store

    # Initial Condition
    x0 = 0.01
    y0 = 0
    z0 = 0

    # Solver parameters
    N = 5000
    t_delta = 0.01
    case = (sigma, beta, rho)
    dataset = ut.generate_dataset(x0, y0, z0, N, t_delta, [case])
    filename = 'data_case' + testcase + '.csv'
    fh.save_data(filename, dataset)
Exemple #18
0
def plot_output(dirname, input_beam, input_pulse, fwhm, amp, fluences, exact_density_out=None, exact_population_final=None):
    filename = lambda name: os.path.join(dirname, name)
    
    density = amp.density
    population = amp.population
    upper = population[0]
    lower = population[1]
    inversion = upper - lower
    
    Z = amp.Z
    T = amp.T
    
    if params.output_rel_time:
        T = T / fwhm
    
    TZ, ZT = np.meshgrid(T, Z)
    
    zlim = (Z[0], Z[-1])
    tlim = (T[0], T[-1])
    
    ref_density = input_pulse.ref_density
    ref_inversion = amp.active_medium.initial_inversion.ref_inversion
    
    out_t_label = norm_t_label if params.output_rel_time else t_amp_label
    
    stride_z = max(len(amp.Z) // params.out_count_z, 1)
    stride_t = max(len(amp.T) // params.out_count_t, 1)
    
    plot.plot_data(filename("density_in"), "Input Photon Density", (T, None, tlim, out_t_label), (density[0]/ref_density, None, None, density_rel_label))
    plot.plot_data(filename("density_out"), "Output Photon Density", (T, None, tlim, out_t_label), (density[-1]/ref_density, None, None, density_rel_label))
    plot.plot_data(filename("densities"), "Input and Output Photon Density", ((T, ) * 2, None, tlim, out_t_label), ((density[0]/ref_density, density[-1]/ref_density), None, None, density_rel_label), ("input pulse", "output pulse"))
    plot.plot_data(filename("densities_norm"), "Normalized Input and Output Photon Density", ((T, ) * 2, None, tlim, out_t_label), ((density[0]/ref_density, density[-1]/np.amax(density[-1])), None, None, density_norm_rel_label), ("input pulse", "output pulse"))
    
    plot.plot_data(filename("upper_init"), "Initial Upper State Population", (Z, None, zlim, z_label), (upper.T[0]/ref_inversion, None, None, upper_rel_label))
    plot.plot_data(filename("upper_final"), "Final Upper State Population", (Z, None, zlim, z_label), (upper.T[-1]/ref_inversion, None, None, upper_rel_label))
    plot.plot_data(filename("lower_init"), "Initial Lower State Population", (Z, None, zlim, z_label), (lower.T[0]/ref_inversion, None, None, lower_rel_label))
    plot.plot_data(filename("lower_final"), "Final Lower State Population", (Z, None, zlim, z_label), (lower.T[-1]/ref_inversion, None, None, lower_rel_label))
    plot.plot_data(filename("inversion_init"), "Initial Population Inversion", (Z, None, zlim, z_label), (inversion.T[0]/ref_inversion, None, None, inversion_rel_label))
    plot.plot_data(filename("inversion_final"), "Final Population Inversion", (Z, None, zlim, z_label), (inversion.T[-1]/ref_inversion, None, None, inversion_rel_label))
    
    plot.plot_projection(filename("density_evo"), "Photon Density Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (density/ref_density, None, density_rel_label), (30, -30), (stride_z, stride_t))
    plot.plot_projection(filename("upper_evo"), "Upper State Population Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (upper/ref_inversion, None, upper_rel_label), (30, 30), (stride_z, stride_t))
    plot.plot_projection(filename("lower_evo"), "Lower State Population Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (lower/ref_inversion, None, lower_rel_label), (30, 30), (stride_z, stride_t))
    plot.plot_projection(filename("inversion_evo"), "Population Inversion Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (inversion/ref_inversion, None, inversion_rel_label), (30, 30), (stride_z, stride_t))
    
    if exact_density_out is not None:
        plot.plot_error(filename("density_err"), "Photon Density Relative Error", (T, None, tlim, out_t_label), ((exact_density_out, density[-1]), None, None, error_label))
    if exact_population_final is not None:
        plot.plot_error(filename("inversion_err"), "Population Inversion Relative Error", (Z, None, zlim, z_label), ((exact_population_final[0] - exact_population_final[1], inversion.T[-1]), None, None, error_label))
        if amp.active_medium.doping_agent.lower_lifetime != 0.0:
            plot.plot_error(filename("upper_err"), "Upper State Population Relative Error", (Z, None, zlim, z_label), ((exact_population_final[0], upper.T[-1]), None, None, error_label))
            plot.plot_error(filename("lower_err"), "Lower State Population Relative Error", (Z, None, zlim, z_label), ((exact_population_final[1], lower.T[-1]), None, None, error_label))
    
    norm_fluences = fluences / input_beam.ref_fluence
    plot.plot_data(filename("fluence"), "Fluence Evolution", (Z, None, zlim, z_label), (norm_fluences, None, None, fluence_rel_label))
Exemple #19
0
from lr import LogisticRegiression
from util import read_data
from plot import plot_data
import numpy as np

if __name__ == '__main__':
    print('Start')
    # ex2data1
    data1 = read_data('./ex2/ex2data1.txt')
    x = data1[:, 0:2]
    x = x.transpose()
    y = data1[:, 2]
    y = y.transpose()
    # y.shape = (1, 100)
    lr = LogisticRegiression(2)
    lr.lib_train_method(x, y)
    weight = lr.get_weight()
    print('The weight and bias is: {}'.format(weight))
    plot_data(x, y, weight[0], weight[1])
    print('End')
Exemple #20
0
def run_algorithms(algorithms, datasets, metrics, output, conf):
    dts = Datasets()
    shall_plot = conf.get("plot_data")
    if shall_plot:
        plot_dir = conf.get("plot_dir", "../plots")

        tmp_plot_dir = "../plots_1"
        if os.path.exists(tmp_plot_dir):
            shutil.rmtree(tmp_plot_dir)

        os.mkdir(tmp_plot_dir)

        orig_data_dir = os.path.join(tmp_plot_dir, "original")
        os.mkdir(orig_data_dir)
        for dataset in datasets:
            plot_data(os.path.join(orig_data_dir, "%s-orig.png"  % dataset), "%s-orig" % dataset, dataset)

    if output == 'dump_text' and not os.path.exists("../dumps"):
        os.mkdir("../dumps")

    for algorithm in algorithms:

        if shall_plot:
            algo_dir = os.path.join(tmp_plot_dir, algorithm)
            os.mkdir(algo_dir)

        algo_conf = conf["algorithms"].get(algorithm, None)

        if not algo_conf:
            logging.error("Algorithm %s not found in conf file" % algorithm)
            sys.exit(0)

        algo_conf['name'] = algorithm
        learn_class = _get_algorithm_class(algorithm)
        learn = learn_class(**algo_conf)
        learn._set_cross_validation(conf.get("cv_method", None), conf.get("cv_metric", None), conf.get("cv_params", None))
        results = []
        for dataset in datasets:
            if dataset not in conf["datasets"]:
                logging.error("Dataset %s not found" % dataset)
                sys.exit(0)

            cv_dir = None
            if shall_plot:
                dataset_dir = os.path.join(algo_dir, dataset)
                os.mkdir(dataset_dir)

                if algo_conf.get("cross_validate", True):
                    cv_dir = os.path.join(dataset_dir, "cv")
                    os.mkdir(cv_dir)

            training_sizes = conf.get("training_size", [0.40])
            scores = []
            for training_size in training_sizes:
                data = dts.load_dataset(dataset, training_size)

                learn.set_dataset(dataset, training_size*100, cv_dir)
                if learn.check_type(data["type"]):
                    eval_metrics = []
                    if metrics:
                        eval_metrics.extend(metrics)
                    else:
                        eval_metrics.extend(algo_conf["allowed_metrics"])

                    learn.train(data["x_train"], data["y_train"])
                    result_tups = learn.evaluate(data["x_test"], data["y_test"], eval_metrics)

                    print_results(training_size, algorithm, dataset, result_tups)
                    results.append((algorithm, dataset, training_size, result_tups))

                    if shall_plot:
                        decision_plot_path = os.path.join(dataset_dir, "decision-%s_%s_size_%d.png" % (dataset, algorithm, training_size * 100))
                        learn.plot_results(decision_plot_path, dataset, training_size, data['x_train'], data['x_test'], data['y_train'], data['y_test'])

                        for metric, y_test, score in result_tups:
                            metric_plot_path = os.path.join(dataset_dir, "metric-%s-%s_%s_size_%d.png" % (metric, dataset, algorithm, training_size * 100))
                            plot_metric(metric_plot_path, data['type'], y_test, data['y_test'], dataset, algorithm, training_size * 100)
                    scores.append(result_tups[0][2])
            if shall_plot:
                train_plot_path = os.path.join(dataset_dir, "train_vs_acc-%s_%s.png" % (algorithm, dataset))
                plot_training_results(train_plot_path, [train_size * 100 for train_size in training_sizes], scores)

        if output == "pdf":
            generate_pdf(results)
        elif output == "dump_text":
            dump_results(algorithm, results)
    if conf.get("plot_data", False):
        shutil.rmtree(plot_dir)
        shutil.move(tmp_plot_dir, plot_dir)
Exemple #21
0
def calculate_hic():
    hic_val = int(hic() * 1000) / 1000.0
    l1['text'] = "HIC: " + str(hic_val)
    plot.plot_data()
def main():

    train, test = load_data()

    prior = 1 / 3

    # Num Classes
    c = train.shape[0]
    # Num Dimensions
    d = train[0].shape[1]

    means = np.empty((c, d))
    covs = np.empty(((c, d, d)))

    ###### Part A #######

    for i in range(c):
        means[i] = np.mean(train[i], axis=0)
        covs[i] = np.cov(train[i], rowvar=0)

    predicted = np.array([], dtype=int)

    flat_test, labels_test = flatten_data(test, c)

    disc_values = np.zeros((100, 3))

    ####### Part B ######

    for i, point in enumerate(flat_test):
        for j in range(c):
            m = discriminant(point, means[j], covs[j], d, prior)
            disc_values[i, j] = m

    predicted = np.argmax(disc_values, axis=1)

    ####### Part C #########

    cm, acc = confusion_matrix(labels_test, predicted, c)

    print("Part C Covariance Matrix")
    print(cm)
    print(f"Error = {1 - acc}")

    ####### Part D ##########

    flat_train, labels_train = flatten_data(train, c)

    plot_data(flat_train.T[0], flat_train.T[1], labels_train, c)
    plot_data(flat_test.T[0], flat_test.T[1], labels_test, c)

    r_train, theta_train = cart2pol(flat_train)

    r_test, theta_test = cart2pol(flat_test)

    plot_data(r_train, theta_train, labels_train, c)
    plot_data(r_test, theta_test, labels_test, c)

    means = np.empty(c)
    covs = np.empty(c)
    posterior = np.zeros(c)

    disc_values = np.empty((r_test.shape[0], c))

    for i in range(c):
        means[i], covs[i] = mu_estimate(r_train[labels_train == i], 0, 100,
                                        .25)

    for i, pt in enumerate(r_test):
        for j in range(c):
            disc_values[i, j] = discriminant(pt, means[j], covs[j], d, prior)

    predicted = np.argmax(disc_values, axis=1)

    cm, acc = confusion_matrix(labels_test, predicted, c)

    print("Part D Covariance Matrix")
    print(cm)
    print(f"Error = {1 - acc}")
Exemple #23
0
def test_plot():
    fig = plot.plot_data('./figures/data.dat')
    assert isinstance(fig, Figure)
Exemple #24
0
 def get(self, request, *args, **kwargs):
     tags = StackOverflowTagsInfo.objects.all()
     if tags:
         div1, div2 = plot_data(tags)
         return render(request, 'index.html', {'div1': div1, 'div2': div2})
     return render(request, 'index.html')
i=0;
data_array=np.array([i*stepwidth,C_0])
A=True

if euler==1:
  while(A):
    i+=1 
    C_1=C_0*(1-stepwidth*L)
    C_0=C_1
    
    data_array=np.vstack((data_array,[i*stepwidth,C_0]))
    output.write(str(i*stepwidth)+"\t"+str(C_0)+"\n")
    
    if i==iterations:
      A=False
else:
  while(A):
    i+=1 
    C_1=C_0*(1-stepwidth*L*0.5)/(1+stepwidth*L*0.5)
    
    C_0=C_1
    
    data_array=np.vstack((data_array,[i*stepwidth,C_0]))
    output.write(str(i*stepwidth)+"\t"+str(C_0)+"\n")
    
    if i==iterations:
      A=False   
output.close()  
plot.plot_data(data_array,HOME+filename)
#plot.plot_data(HOME,filename) print it from file
Exemple #26
0
from gradientDe import gradientDescent
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
## ==================== Part 1: Basic Function ====================
#Complete warmUpExercise.m
print('Running warmUpExercise ... ')
print('5x5 Identity Matrix: ')
warmUp.warmUpExercise()

#======================= Part 2: Plotting =======================
print('Plotting Data ...')
data = np.loadtxt('ex1data1.txt', delimiter=',')
X = data[:, 0]
y = data[:, 1]
plt.figure(0)
plot_data(X, y)

#=================== Part 3: Cost and Gradient descent ===================
m = np.size(y)
X = np.column_stack((np.ones(m), X))  # % Add a column of ones to x
theta = np.zeros((2, 1))  # % initialize fitting parameters
y = y.reshape(m, 1)

#% Some gradient descent settings
iterations = 1500
alpha = 0.01

print('Testing the cost function ...')
#% compute and display initial cost
J = cost(X, y, theta)
print('With theta = [0 ; 0], Cost computed = {:.2f}'.format(
        action = None

        for line in fileinput.input():
            if line.rstrip() not in "12345" or len(line.rstrip()) != 1:
                print('Invalid input, try again!!!')
            else:
                action = line.rstrip()
                fileinput.close()

        if action == '1':

            build_train_data()
            build_comparison_data()

        elif action == '2':

            plot_data(filename)

        elif action == '3':

            stock_prediction_LSTM(filename, True)

        elif action == '4':

            stock_prediction_LSTM(filename, False)

        elif action == '5':

            break

3
Exemple #28
0
 def get(self, request, *args, **kwargs):
     tags = StackOverflowTagsInfo.objects.all()
     if tags:
         div1, div2 = plot_data(tags)
         return render(request, 'index.html', {'div1': div1, 'div2': div2})
     return render(request, 'index.html')
Exemple #29
0
    data = read_data('./ex1/ex1data1.txt')
    linear = LinearRegression(1, 1, 0.01)
    x = np.mat(data[:,0])
    y = np.mat(data[:,1])
    print('Start train model linear regression with variable')
    linear.train(x, y, 1000,  lr=0.01)
    print('End train model linear regireesion with one variable')
    x_list = x.tolist()[0]
    x_list.sort()
    x_sorted = np.mat([x_list])
    y_pred = linear.predict(x_sorted)
    print('End linear regression with one variable')
    # multiple var regressioin
    print('Start linear regression with multiple variables')
    data_multiple = read_data('./ex1/ex1data2.txt')
    # normalize data
    data_multiple = normalize(data_multiple)
    linear2 = LinearRegression(2, 1, 0.01)
    x2 = np.mat(data_multiple[:,0:2])
    y2 = np.mat(data_multiple[:, 2])
    lr_list = [0.0001, 0.001, 0.01, 1/np.exp(1)]
    loss = []
    for lr in lr_list:
        print('Start linear regression with multiple variables train when learning rate={}'.format(lr))
        loss.append(linear2.train(x2.transpose(), y2, lr=lr, max_turns=50))
        print('End linear regression with multiple variables train when learning rate={}'.format(lr))
    print('End linear regression with multiple variables')
    # Plot data
    plot_data(x, y, x_sorted, y_pred, lr_list, loss)
    print('End')
Exemple #30
0
from plot import plot_data
import pandas as pd

data = pd.read_csv('data.csv')

plot_data(data)
Exemple #31
0
def main(argv):
    tf.reset_default_graph()

    g = tf.Graph()
    with g.as_default():
        # check if the image folders exist. If the image folders do not exist this call will unpack the .tar.gz files
        ImageData.check_and_uncompress_images()

        # read all the images from the image folder, ang get the image names and labels
        image_names, image_labels = ImageData.image_names_and_labels()

        # images that are read from the folders need to be stratified.
        # shuffle and split the data into 90%-10% train/test datasets.
        # stratify will make sure that the classes are distributed equally among these two sets
        X_train, X_test, y_train, y_test = ImageData.stratify(image_names,
                                                              image_labels,
                                                              test_size=0.1,
                                                              shuffle=True)

        train_count = len(y_train)
        test_count = len(y_test)
        print("Train Image Count = {}\nTest Image Count = {}".format(
            train_count, test_count))

        # saving the reference to the test images for later (to be used in eval.py)!
        ImageData.save_as_csv(X_test, y_test, ['image_name', 'label'], LOG_DIR,
                              'test_data.csv')

        number_of_batches = int(ceil(len(y_train) / batch_size))
        print("{} batches in each epoch\n".format(number_of_batches))

        # training data pipeline and the iterator
        train_images = X_train
        train_labels = tf.cast(y_train, tf.int32)

        # when running using a GPU, set prefetch_to_device=True
        train_iterator = ImageData.train_dataset_input_fn(
            train_labels,
            train_images,
            batch_size,
            num_epochs,
            prefetch_to_device=False)
        train_next_batch = train_iterator.get_next()

        # test data pipeline and the iterator
        test_images = X_test
        test_labels = tf.cast(y_test, tf.int32)

        # when running using a GPU, set prefetch_to_device=True
        test_iterator = ImageData.test_dataset_input_fn(
            test_labels, test_images, batch_size, prefetch_to_device=False)
        test_next_batch = test_iterator.get_next()

        # embedding data pipeline and the iterator
        # we have selected 1/4th of the Test Data, 311 rows, due to the memory limitations of our GPU
        embedding_iterator = ImageData.test_dataset_input_fn(
            test_labels, test_images, 311, prefetch_to_device=False)
        embedding_next_batch = embedding_iterator.get_next()

        # placeholders
        features = tf.placeholder(tf.float32, shape=(None, 160, 120, 3))
        labels = tf.placeholder(tf.int32, shape=(None, ))

        # model
        logits = cnn_model_fn(features, is_training=True)

        # embedding (for Projector visualization in tensorboard)
        embedding = tf.Variable(np.zeros([311, logits.shape[1]]),
                                dtype=tf.float32,
                                name='test_embedding')
        assignment = embedding.assign(logits)

        metadata = 'metadata.tsv'
        config = projector.ProjectorConfig()
        embedding_config = config.embeddings.add()
        embedding_config.tensor_name = embedding.name
        embedding_config.metadata_path = metadata

        # loss
        with tf.name_scope('loss'):
            with tf.name_scope('cross_entropy'):
                loss = loss_fn(labels, logits)
            tf.summary.scalar('loss', loss)
            tf.summary.histogram("loss", loss)

        # optimizer
        with tf.name_scope('optimizer'):
            lr = 0.1
            step_rate = 1000
            decay = 0.95

            global_step = tf.Variable(0, trainable=False)
            tf.assign(global_step, global_step + 1)

            learning_rate = tf.train.exponential_decay(lr,
                                                       global_step,
                                                       step_rate,
                                                       decay,
                                                       staircase=True)
            tf.summary.scalar('learning_rate', learning_rate)

            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                               epsilon=0.01)

            train_op = optimizer.minimize(loss=loss, global_step=global_step)

        # predictions
        with tf.name_scope('predictions'):
            predictions = tf.cast(tf.argmax(input=logits, axis=1), tf.int32)

        # model performance metric
        with tf.name_scope('performance'):
            with tf.name_scope('correct_prediction'):
                correct_prediction = tf.equal(predictions, labels)
            with tf.name_scope('accuracy'):
                accuracy = accuracy_fn(correct_prediction)
            tf.summary.scalar('accuracy', accuracy)
            tf.summary.histogram("accuracy", accuracy)

        saver = tf.train.Saver()

        init = tf.global_variables_initializer()

        start_time = time.time()
        with tf.Session(graph=g) as sess:

            sess.graph.as_graph_def()

            # log training values separately from the test values
            train_writer = tf.summary.FileWriter(
                os.path.join(LOG_DIR, 'train'), sess.graph)
            test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'),
                                                sess.graph)

            # saves the config file that TensorBoard will read during startup.
            projector.visualize_embeddings(test_writer, config)

            # merge all summaries so that they can be written to the model file
            merged_summaries = tf.summary.merge_all()

            # initializations
            sess.run(init)
            sess.run(train_iterator.initializer)
            sess.run(test_iterator.initializer)
            sess.run(embedding_iterator.initializer)

            iter = 0
            epoch = 1
            l = list()
            # iterate until the batch iterator finishes all the batches and epochs
            while True:
                try:
                    # get the next batch of training data...
                    train_next_features, train_next_labels = sess.run(
                        train_next_batch)
                    feed_dict_train = {
                        features: train_next_features,
                        labels: train_next_labels
                    }

                    # ...and train the model
                    with tf.name_scope('training'):
                        loss_train, _ = sess.run([loss, train_op],
                                                 feed_dict=feed_dict_train)
                        l.append(loss_train)

                    # at every 20 iterations calculate the accuracy and log the summaries for both train and test
                    if iter % 20 == 0:
                        with tf.name_scope('evaluation'):
                            with tf.name_scope('train'):
                                train_batch_predictions, summaries, accuracy_train = sess.run(
                                    [predictions, merged_summaries, accuracy],
                                    feed_dict=feed_dict_train)

                                train_writer.add_summary(summaries,
                                                         global_step=iter)

                            test_next_features, test_next_labels = sess.run(
                                test_next_batch)
                            feed_dict_test = {
                                features: test_next_features,
                                labels: test_next_labels
                            }
                            with tf.name_scope('test'):
                                # get the next test batch which the model hasn't seen!
                                test_batch_predictions, summaries, accuracy_test = sess.run(
                                    [predictions, merged_summaries, accuracy],
                                    feed_dict=feed_dict_test)

                                test_writer.add_summary(summaries,
                                                        global_step=iter)

                                elapsed_time = time.time() - start_time
                                start_time = time.time()

                                print(
                                    "iter={:4d}, TRAIN loss={:5.2f}, acc={:5.2f}, TEST acc={:5.2f}, time={:5.2f}"
                                    .format(iter, loss_train, accuracy_train,
                                            accuracy_test, elapsed_time))

                        save_path = saver.save(
                            sess, os.path.join(LOG_DIR, 'model.ckpt'), iter)

                    if iter > 0 and iter % number_of_batches == 0:
                        print("Epoch {} completed".format(epoch))
                        epoch += 1

                    iter += 1
                except tf.errors.OutOfRangeError:
                    # this is the end of all epochs!
                    print("Last Epoch {} completed".format(epoch))

                    # calculate the final test accuracy and model summaries and save it
                    test_next_features, test_next_labels = sess.run(
                        test_next_batch)
                    feed_dict_test = {
                        features: test_next_features,
                        labels: test_next_labels
                    }

                    test_batch_predictions, summaries, accuracy_test = sess.run(
                        [predictions, merged_summaries, accuracy],
                        feed_dict=feed_dict_test)

                    test_writer.add_summary(summaries, global_step=iter)

                    elapsed_time = time.time() - start_time
                    start_time = time.time()

                    print("iter={:4d}, FINAL TEST acc={:5.2f}, time={:5.2f}".
                          format(iter, accuracy_test, elapsed_time))

                    # calculate the embedding for Projector visualization in Tensorboard
                    print("Processing Embeddings")
                    embed_next_features, embed_next_labels = sess.run(
                        embedding_next_batch)
                    feed_dict_embed = {
                        features: embed_next_features,
                        labels: embed_next_labels
                    }

                    # and write the labels that match the input data to the metadata file
                    with open(os.path.join(LOG_DIR, 'test', metadata),
                              'w') as metadata_file:
                        for row in embed_next_labels:
                            metadata_file.write('%d\n' % row)

                    sess.run(assignment, feed_dict=feed_dict_embed)

                    elapsed_time = time.time() - start_time

                    print("Processing time={:5.2f}".format(elapsed_time))

                    # save the final checkpoint!
                    save_path = saver.save(sess,
                                           os.path.join(LOG_DIR, 'model.ckpt'),
                                           iter)
                    break

            # goodbye!
            print("done!")

            # plot the accumulated loss values over the course of iterations
            plot_data(np.arange(0, len(l), 1, dtype=np.int), l, num_epochs)
Exemple #32
0
        T = amp.T
        if params.output_rel_time:
            T = T / params.pulse_duration
        out_t_label = output.norm_t_label if params.output_rel_time else output.t_amp_label
        tlim = (T[0], T[-1])
        Ts = (T,) * 3
        ref_density = ref_pulse.ref_density
        densities = (density_out_4 / ref_density, density_out / ref_density, density_out_3 / ref_density)
        lifetime_scale = unitconv.units[output.lower_lifetime_unit]
        lsl_graph_label_fmt = (r"%g \, %s" % (lower_lifetime/lifetime_scale, output.lower_lifetime_unit))
        if lower_lifetime == 0.0:
            lsl_graph_label_fmt = "0"
        elif math.isinf(lower_lifetime):
            lsl_graph_label_fmt = "\\infty"
        labels = (output.lower_lifetime_legend % "0", output.lower_lifetime_legend % lsl_graph_label_fmt, output.lower_lifetime_legend % "\\infty")
        plot.plot_data(filename("lsl_effects"), "Effects of Lower State Lifetime", (Ts, None, tlim, out_t_label), (densities, None, None, output.density_rel_label), labels)

def select_methods(perform_opt_pump, perform_opt_geom, (int_types, amp_types), inversions_pump, inversions_geom):
    print output.div_line
    print "determining extended mode method combinations"
    
    (num_types_pump, counts_pump), (num_types_geom, counts_geom) = (None, None), (None, None)
    
    if perform_opt_pump:
        print "pumping"
        max_inversion_pump = inversions_pump[-1, -1]
        (num_types_pump, counts_pump), _ = core.select_methods((int_types, amp_types), max_inversion_pump, quiet=True)
    
    if perform_opt_geom:
        print "geometry"
        max_medium_radius = params.ext_opt_geom_mediumradius[1]
Exemple #33
0
def train_network(data_dict, agent_params, training_params):
    device = agent_params['network_params']['gpu1']
    net = training_params['network'](agent_params['network_params'])
    if training_params['resume']:
        load_weights(net)
    count_parameters(net)
    # if torch.cuda.device_count() > 1:
    #     dist.init_process_group("gloo", rank=rank, world_size=world_size)
    #     net = DDP(net)
    net.to(device)
    if 'category_weights' in data_dict:
        criterion = training_params['criterion'](
            data_dict['category_weights'].to(device))
    else:
        criterion = training_params['criterion']()
    optimizer = optim.Adam(net.parameters(), lr=0.003)
    lr_stepsize = training_params['epochs'] // 5
    lr_stepper = MultiStepLR(
        optimizer=optimizer,
        milestones=[lr_stepsize * 2, lr_stepsize * 3, lr_stepsize * 4],
        gamma=0.1)
    scores = []
    val_scores = []
    score_window = deque(maxlen=100)
    val_window = deque(maxlen=100)
    for epoch in range(training_params['epochs']):
        losses = []
        for i, data in enumerate(data_dict['trainloader'], 1):
            sys.stdout.write('\r')
            # get the inputs; data is a list of [inputs, targets]
            inputs, targets = data.values()
            targets = targets.cuda() if torch.cuda.is_available() else targets
            # zero the parameter gradients
            optimizer.zero_grad()
            # unspool hand into 60,5 combos
            if training_params['five_card_conversion'] == True:
                inputs = unspool(inputs)
            if training_params['one_hot'] == True:
                inputs = torch.nn.functional.one_hot(inputs)
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()
            losses.append(loss.item())
            sys.stdout.write(
                "[%-60s] %d%%" %
                ('=' * (60 * (i + 1) // len(data_dict['trainloader'])),
                 (100 * (i + 1) // len(data_dict['trainloader']))))
            sys.stdout.flush()
            sys.stdout.write(f", training sample {(i+1):.2f}")
            sys.stdout.flush()
        print('outputs', outputs.shape)
        print(
            f'\nMaximum value {torch.max(torch.softmax(outputs,dim=-1),dim=-1)[0]}, Location {torch.argmax(torch.softmax(outputs,dim=-1),dim=-1)}'
        )
        print('targets', targets[:100])
        lr_stepper.step()
        score_window.append(loss.item())
        scores.append(np.mean(score_window))
        net.eval()
        val_losses = []
        for i, data in enumerate(data_dict['valloader'], 1):
            sys.stdout.write('\r')
            inputs, targets = data.values()
            targets = targets.cuda() if torch.cuda.is_available() else targets
            if training_params['five_card_conversion'] == True:
                inputs = unspool(inputs)
            if training_params['one_hot'] == True:
                inputs = torch.nn.functional.one_hot(inputs)
            val_preds = net(inputs)
            val_loss = criterion(val_preds, targets)
            val_losses.append(val_loss.item())
            sys.stdout.write("[%-60s] %d%%" %
                             ('=' * (60 *
                                     (i + 1) // len(data_dict['valloader'])),
                              (100 * (i + 1) // len(data_dict['valloader']))))
            sys.stdout.flush()
            sys.stdout.write(f", validation sample {(i+1):.2f}")
            sys.stdout.flush()
            if i == 100:
                break
        print('\nguesses', torch.argmax(val_preds, dim=-1)[:100])
        print('targets', targets[:100])
        val_window.append(sum(val_losses))
        val_scores.append(np.mean(val_window))
        net.train()
        print(
            f"\nTraining loss {np.mean(score_window):.4f}, Val loss {np.mean(val_window):.4f}, Epoch {epoch}"
        )
        torch.save(net.state_dict(), training_params['save_path'])
    print('')
    # Save graphs
    loss_data = [scores, val_scores]
    loss_labels = ['Training_loss', 'Validation_loss']
    plot_data(f'{network.__name__}_Handtype_categorization', loss_data,
              loss_labels)
    # check each hand type
    if 'y_handtype_indexes' in data_dict:
        net.eval()
        for handtype in data_dict['y_handtype_indexes'].keys():
            mask = data_dict['y_handtype_indexes'][handtype]
            inputs = data_dict['valX'][mask]
            if training_params['five_card_conversion'] == True:
                inputs = unspool(inputs)
            if training_params['one_hot'] == True:
                inputs = torch.nn.functional.one_hot(inputs)
            if inputs.size(0) > 0:
                val_preds = net(inputs)
                val_loss = criterion(val_preds, data_dict['valY'][mask])
                print(
                    f'test performance on {training_params["labels"][handtype]}: {val_loss}'
                )
        net.train()
Exemple #34
0
def compute_inversion_geom_dependence(task_pool, dirname):
    filename = lambda name: os.path.join(dirname, name)
    
    print output.div_line
    print "computing inversion dependence on geometry parameters"
    
    pump_system = model.pump.PumpSystem(params.pump_wavelen, params.pump_duration, params.pump_power, params.pump_efficiency)
    
    min_medium_radius = params.ext_opt_geom_mediumradius[0]
    
    count_rm = params.ext_opt_geom_resolution[0]
    
    Rm = np.linspace(min_medium_radius, params.ext_opt_geom_mediumradius[1], count_rm)
    
    depop_model_class1 = params.depop_model_class
    depop_model_class2 = params.ext_alt_depop_model
    
    inversions1, inversions2, gains1, gains2, stored_energies1, stored_energies2, inversion_rdiffs, gain_rdiffs = task_pool.parallel_task(_inversion_geom_dependence_task, (Rm,), (), (pump_system, (depop_model_class1, depop_model_class2)))
    
    output.show_status((count_rm, None), params.extended_status_strides, True)
    
    if params.graphs:
        print output.status_writing
        dirname = os.path.join(dirname, output.opt_geom_rel_path)
        dirname = output.init_dir(dirname)
        inversion_rdiff_max = params.ext_opt_inversion_rdiff_max
        pump_energy = params.pump_duration * params.pump_power
        energy_ylim = (0.0, pump_energy * 1.25)
        labels = [cls.descr for cls in [depop_model_class1, depop_model_class2]]
        plot.plot_data(filename("inversion"), "Inversion (%s)" % depop_model_class1.descr, (Rm, None, None, output.medium_radius_label), (inversions1, None, None, output.inversion_abs_label))
        plot.plot_data(filename("inversion_alt"), "Inversion (%s)" % depop_model_class2.descr, (Rm, None, None, output.medium_radius_label), (inversions2, None, None, output.inversion_abs_label))
        plot.plot_data(filename("inversions"), "Population Inversion", ([Rm]*2, None, None, output.medium_radius_label), ([inversions1, inversions2], None, None, output.inversion_abs_label), legend=labels)
        plot.plot_data(filename("ss_gain"), "Small Signal Gain (%s)" % depop_model_class1.descr, (Rm, None, None, output.medium_radius_label), (gains1, None, None, output.gain_label))
        plot.plot_data(filename("ss_gain_alt"), "Small Signal Gain (%s)" % depop_model_class2.descr, (Rm, None, None, output.medium_radius_label), (gains2, None, None, output.gain_label))
        plot.plot_data(filename("ss_gains"), "Small Signal Gain", ([Rm]*2, None, None, output.medium_radius_label), ([gains1, gains2], None, None, output.gain_label), legend=labels)
        plot.plot_data(filename("energy_stored"), "Stored Energy (%s)" % depop_model_class1.descr, (Rm, None, None, output.medium_radius_label), (stored_energies1, None, energy_ylim, output.energy_abs_stored_label), yvals=[(pump_energy, "pump energy")])
        plot.plot_data(filename("energy_stored_alt"), "Stored Energy (%s)" % depop_model_class2.descr, (Rm, None, None, output.medium_radius_label), (stored_energies2, None, energy_ylim, output.energy_abs_stored_label), yvals=[(pump_energy, "pump energy")])
        plot.plot_data(filename("energies_stored"), "Stored Energy", ([Rm]*2, None, None, output.medium_radius_label), ([stored_energies1, stored_energies2], None, energy_ylim, output.energy_abs_stored_label), legend=labels, yvals=[(pump_energy, "pump energy")])
        plot.plot_data(filename("inversion_rdiff"), "Inversion Relative Difference", (Rm, None, None, output.medium_radius_label), (inversion_rdiffs, None, None, output.inversion_rdiff_label), yvals=[(inversion_rdiff_max, None)])
        plot.plot_data(filename("ss_gain_rdiff"), "Small Signal Gain Relative Difference", (Rm, None, None, output.medium_radius_label), (gain_rdiffs, None, None, output.gain_rdiff_label), yvals=[(inversion_rdiff_max, None)])
    
    return inversions1, inversion_rdiffs
def main():
    """Main
    """
    parser = argparse.ArgumentParser()
    parser.add_argument("--wav", default="LDC93S1.wav")
    parser.add_argument("--winstep", type=float, default=0.01)
    parser.add_argument("--winlen", type=float, default=0.025)
    parser.add_argument("--debug", type=bool, default=True)

    args = parser.parse_args()
    complex_spec, complex_spec_time_scaled = get_complex_spec(
        args.wav, args.winstep, args.winlen, with_time_scaled=True)

    if args.debug:
        mag_spec = get_mag_spec(complex_spec)
        phase_spec = get_phase_spec(complex_spec)
        mag_spec_time_scaled = get_mag_spec(complex_spec_time_scaled)
        phase_spec_time_scaled = get_phase_spec(complex_spec_time_scaled)

        plot_data(mag_spec, "mag.png", "mag")
        plot_data(phase_spec, "orig_phase.png", "phase")
        plot_data(mag_spec_time_scaled, "mag_spec_time_scaled.png",
                  "mag_spec_time_scaled")
        plot_data(phase_spec_time_scaled, "phase_spec_time_scaled.png",
                  "phase_spec_time_scaled")

    modgdf = get_modgdf(complex_spec, complex_spec_time_scaled)
    plot_data(modgdf, "modgdf.png", "modgdf")
    plot_data(np.absolute(modgdf), "abs_modgdf.png", "abs_modgdf")
Exemple #36
0
 def plot_data(self, x, y, title):
     plot_data(x=x, y=t, title=title)
#reading model parameters from configuration file
params = read_params(args.params)

#building the road
road = Road(**params)
road.make_cells()

#reading bottleneks
bns = read_bottlenecks(road, args.bottlenecks)

#simulation with the chosen bottlenecks
road.simulation(bns)


#saving output
if args.filename:
    name = args.filename
else:
    date = datetime.now().strftime("%d-%b-%Y_%H:%M:%S")
    name = 'density_plot_'+date

#directory with output and configuration
directory_name ='output/'+name
mkdir(directory_name)

plot_data(road, directory_name+'/plot.png')

copyfile(args.params, directory_name+'/configuration.csv')
copyfile(args.bottlenecks, directory_name+'/bottlenecks.csv')
Exemple #38
0
    labels = data.get_windows_per_label()

    exp, user, start, stop = labels[1][0]

    acc_file, gyro_file = data.get_raw_data_files(exp, user)

    acc_data = data.get_data(acc_file)[start:stop]
    # gyro_data = data.get_data(gyro_file, start, stop)

    plt.figure(facecolor="white", figsize=(15, 7))

    original = preprocess([], acc_data)

    plt.subplot(221)
    plot.plot_data(original, 'Acc')

    plt.subplot(222)
    plot.plot_freq_spec(original, 'Acc')

    # Standardize using global means
    preprocessed = preprocess([hann, medfilt], acc_data)
    plt.subplot(223)
    plot.plot_data(preprocessed, 'Acc_pre')

    plt.subplot(224)
    plot.plot_freq_spec(preprocessed, 'Acc_pre')

    # plot_data(gyro_data, 'Gyro')
    plt.show()
Exemple #39
0
from fetch import retrieve_axises
from plot import plot_data


if __name__ == "__main__":
    import argparse
    import pycountry

    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument('--country', type=str, help='report by country')
    group.add_argument('--code', type=str, help='report by country code')

    args = parser.parse_args()

    if args.code is None:
        country = args.country.capitalize()
        code = pycountry.countries.get(name=country).alpha_2
    else:
        code = args.code
        country = pycountry.countries.get(alpha_2=args.code).name

    data = retrieve_axises(code)

    print(f"Analysing {country} data")
    plot_data(country, data)
    P[i]=P_m[j-1,i]+dt*(D_P*(P_m[j-1,last_i]-2.*P_m[j-1,i]+P_m[j-1,first_i])/(h*h))
    i+=1
  #synchron update & check mass
  i=0  
  for x in lattice_X :  
    A_m[j,i]=A[i]
    mass_A[j]+=h*A[i]
    P_m[j,i]=P[i]
    mass_P[j]+=h*P[i]
    i+=1
  if j==len(lattice_T)-1:
    break

savetxt(HOME+filename_T, lattice_T[output_rows])
savetxt(HOME+filename_X, lattice_X)
savetxt(HOME+filename1, A_m[output_rows,:],fmt='%1.6f')
savetxt(HOME+filename2, P_m[output_rows,:],fmt='%1.6f')
savetxt(HOME+"mass_A.dat", mass_A[output_rows],fmt='%1.6f')
savetxt(HOME+"mass_P.dat", mass_P[output_rows],fmt='%1.6f')
#plot simulation_data
plot.plot_data("mass_A",HOME,filename_T,"mass_A.dat")
plot.plot_data("mass_P",HOME,filename_T,"mass_P.dat")
plot.plot_data_3("A_25",HOME,filename_X,filename1,25,lattice_T,D_A)
plot.plot_data_3("A_50",HOME,filename_X,filename1,50,lattice_T,D_A)
plot.plot_data_3("A_75",HOME,filename_X,filename1,75,lattice_T,D_A)
plot.plot_map_c("A","gist_heat",HOME,filename_X,filename_T,filename1)
#plot.plot_map_c("P","bone",HOME,filename_X,filename_T,filename2)
plot.fit_gauss(HOME,filename_X,filename_T,filename1,D_A)
#plot.fit_gauss("P",HOME,filename_X,filename_T,filename2)