def one_instance(**kwargs):
    if kwargs:
        c_l = kwargs['c_l']
        g_r = kwargs['g_r']
        o_s = kwargs['o_s']
        delta_t, sim_duration, dspt_times, \
        stop_locs, demand_rates, board_rates, stop_num, demand_start_times, \
        link_mean_speeds, link_cv_speeds, link_lengths, link_start_locs, \
        cycle_lengths, green_ratios, signal_offsets, signal_locs, examined_signal \
        = get_parameters(cycle_length=c_l, green_ratio=g_r, off_set=o_s)
    else:
        delta_t, sim_duration, dspt_times, \
        stop_locs, demand_rates, board_rates, stop_num, demand_start_times, \
        link_mean_speeds, link_cv_speeds, link_lengths, link_start_locs, \
        cycle_lengths, green_ratios, signal_offsets, signal_locs, examined_signal \
        = get_parameters()

    for stop in range(stop_num):
        total_hdws_dict[stop] = []

    processes = []
    for i in range(process_num):
        process = Process(target=get_result, args=(batch_no, delta_t, sim_duration, dspt_times, \
            stop_locs, demand_rates, board_rates, stop_num, demand_start_times, \
                link_mean_speeds, link_cv_speeds, link_lengths, link_start_locs, \
                    cycle_lengths, green_ratios, signal_offsets, signal_locs, ))
        process.start()
        processes.append(process)

    for process in processes:
        process.join()

    arr_means = []
    arr_stds = []
    for stop in range(stop_num):
        arr_mean, arr_std = get_mean_std(total_hdws_dict[stop])
        arr_stds.append(arr_std)
        arr_means.append(arr_mean)
    return arr_means, arr_stds
Exemple #2
0
def kde_startup():

    try:

        project_path, sessions_path = path.project_path(), path.sessions_path()
        sessions_dir = os.path.join(project_path + sessions_path)

        id = []

        with open(sessions_dir, 'rt') as sessions_r:
            active = csv.reader(sessions_r, delimiter=',')
            for _id in active:
                if session['username'] in _id[0]:
                    id.append(session['username'])

        if session['username'] in id:

            params = par_fn.get_parameters()

            years, months, codes, kernels, bandwidths, metrics = params[0],\
                                                                 params[1],\
                                                                 params[2],\
                                                                 params[3],\
                                                                 params[4],\
                                                                 params[5]

            months, codes, metrics = [month for month in months.keys()],\
                                     [code for code in codes.keys()],\
                                     [metric for metric in metrics.keys()]

            kde = fn.set_kde_startup()
            kde_script, kde_div = components(kde)

            session_status = 'App ready.'

            return render_template('mkecs-kde.html',
                                   kde_script=kde_script,
                                   kde_div=kde_div,
                                   months=months,
                                   years=years,
                                   codes=codes,
                                   kernels=kernels,
                                   bandwidths=bandwidths,
                                   metrics=metrics,
                                   session_status=session_status,
                                   session_id=session['username'])
    except:

        return redirect(url_for('mkecs_kde_session'))
    def get_confusion_matrix_and_accuracy(self, fold_num):
        """Método que calcula a matriz de confusao e a acuracia por classe"""
        letters = u.get_classes_letters_list(self.part_2)

        # obtenção das imagens do diretório de testes do dataset
        # função teste chamada para cada imagem
        # para adicionar os acertos na matriz de confusao posteriormente
        parameters_test = p.get_parameters(self.descriptor, self.part_2, self.l1_neurons)
        dataset_validation = u.get_dataset_list(
            u.get_classes_list(parameters_test['testpath']), parameters_test['testpath'])

        for dataset in dataset_validation:
            self.num_tests_images_per_letter = len(dataset)
            for image_i, image in enumerate(dataset):
                self.testing(image, image_i, dataset_tests=True)

        # matriz de confusão
        obtained = pd.Series(self.test_results, name='   Obtido')
        predicted = pd.Series(self.test_predicted, name='Esperado')
        confusion_matrix = pd.crosstab(obtained, predicted)
        print(confusion_matrix)

        # plotar matriz de confusão com o uso das bibliotecas seaborn e pyplot
        df_cm = pd.DataFrame(confusion_matrix, index=[i for i in letters],
                  columns=[i for i in letters])
        if self.part_2:
            plt.figure(figsize=(17, 10))
        else:
            plt.figure(figsize=(10, 7))
        sn.heatmap(df_cm, annot=True, cmap='PuBu', fmt='g')
        plt.ylabel('Obtido')
        plt.xlabel('Esperado')

        file_command = '{output}confusion_matrix-{fold}.jpg'.format(fold=fold_num,
            output=self.output_directory)

        plt.savefig(file_command)
        plt.close()

        # acuracia
        accuracy = accuracy_score(obtained, predicted)
        print("\nAcurácia média: {} \n".format(accuracy))

        # acuracia por letra calculada dividindo acertos (diagonal da matriz de confusao)
        # pelo total de imagens que foram testadas daquela letra
        for letter_i, letter in enumerate(letters):
            positive = confusion_matrix[letter][letter]
            total = self.num_tests_images_per_letter
            print("Acurácia da classe {} :".format(letter), positive / total)
Exemple #4
0
def main():
    # Preparation
    config = get_parameters()
    transformer = get_time_frequency_transform(config)

    data_loader_train, data_loader_valid = get_data(config)
    audio, target, fname = data_loader_train.dataset[0]
    batch_audio, batch_target, batch_fname = next(
        iter(data_loader_train))  # Get single batch

    spec = transformer(audio)
    batch_specs = transformer(batch_audio)

    # Sanity check for shapes
    assert spec.shape[0] == 1, 'Wrong size for spec'
    assert spec.shape[1] == config.n_fft // 2 + 1, 'Wrong size for spec'
    assert spec.shape[2] == config.max_length_frames, 'Wrong size for spec'

    assert batch_specs.shape[
        0] == config.batch_size, 'Wrong size for batch_spec'
    assert batch_specs.shape[1] == 1, 'Wrong size for batch_spec'
    assert batch_specs.shape[
        2] == config.n_fft // 2 + 1, 'Wrong size for batch_spec'
    assert batch_specs.shape[
        3] == config.max_length_frames, 'Wrong size for batch_spec'

    # Manual plotting
    import matplotlib.pyplot as plt
    fig = plt.figure()
    im = plt.imshow(spec[0].numpy()[::-1, :], cmap=plt.get_cmap('magma'))
    fig.colorbar(im)
    plt.show()

    # Using librosa
    utils.show_spectrogram(spec, config)
    print(spec)

    # Librosa, by batch
    utils.show_spectrogram_batch(batch_specs, batch_fname, config, gridSize=4)
Exemple #5
0
edge_file = sys.argv[2]
poly_file = sys.argv[3]

# Parameters
lx = 9 * (2 / (3 * (3**0.5)))**0.5
ly = 4 * (2 / (3**0.5))**0.5
ka = 1.
A0 = 1.  # current preferred area for polygon
gamma = 0.04 * ka * A0  # hexagonal network
# gamma = 0.1 * ka * A0 # soft network
Lambda = 0.12 * ka * (A0**(3 / 2))  # hexagonal network
# Lambda = -0.85 * ka * A0**(3/2) # soft network
lmin = 0.2
delta_t = 0.05
eta = 1.

# get parameter dictionary
parameters = get_parameters(lx, ly, ka, gamma, Lambda, eta, lmin, delta_t)

# get vertices
vertices = read_vertices(vertex_file)

# get edges
edges = read_edges(edge_file)

# get polygons
poly_indices = read_poly_indices(poly_file)
polys = build_polygons(poly_indices, A0)

steepest_descent(vertices, edges, polys, parameters)
Exemple #6
0
def main():
    # Reproducibility
    np.random.seed(12345)
    torch.manual_seed(12345)

    # Preparation
    config = get_parameters()

    # Logging configuration
    writer = None
    if config.tensorboard:
        path_tensorboard = f'{config.logging_dir}/{config.experiment_description}'
        if config.debug_mode:  # Clear tensorboard when debugging
            if os.path.exists(path_tensorboard):
                shutil.rmtree(path_tensorboard)
        writer = SummaryWriter(path_tensorboard)

    data_loader_train, data_loader_valid, data_loader_test = get_data(config)

    if config.use_time_freq:
        transforms = get_time_frequency_transform(config)
    else:
        transforms = None

    # =====================================================================
    # Visualize some data
    tmp_audio = None
    tmp_spec = None
    tmp_data, targets, _ = data_loader_train.dataset[
        0]  # audio is [channels, timesteps]

    # Is the data audio or image?
    if len(tmp_data.shape) == 2:
        tmp_audio = tmp_data
    else:
        tmp_spec = tmp_data

    if config.use_time_freq:
        tmp_spec = transforms(
            tmp_audio)  # spec is [channels, freq_bins, frames]

    if tmp_spec is not None:
        utils.show_spectrogram(tmp_spec, config)

    if writer is not None:
        if tmp_audio is not None:
            # Store 5 secs of audio
            ind = tmp_audio.shape[-1] if tmp_audio.shape[
                -1] <= 5 * config.original_fs else 5 * config.original_fs
            writer.add_audio('input_audio', tmp_audio[:, 0:ind], None,
                             config.original_fs)

            tmp_audios = []
            fnames = []
            for i in range(4):
                aud, _, fn = data_loader_train.dataset.dataset[i]
                fnames.append(fn)
                tmp_audios.append(aud)
            writer.add_figure(
                'input_waveform',
                utils.show_waveforms_batch(tmp_audios, fnames, config), None)

        # Analyze some spectrograms
        if tmp_spec is not None:
            img_tform = tforms_vision.Compose([
                tforms_vision.ToPILImage(),
                tforms_vision.ToTensor(),
            ])

            writer.add_image('input_spec', img_tform(tmp_spec),
                             None)  # Raw tensor
            writer.add_figure('input_spec_single',
                              utils.show_spectrogram(tmp_spec, config),
                              None)  # Librosa

            if config.use_time_freq:
                tmp_specs = []
                fnames = []
                for i in range(4):
                    aud, _, fn = data_loader_train.dataset.dataset[i]
                    tmp_specs.append(transforms(aud))
                    fnames.append(fn)

                writer.add_figure(
                    'input_spec_batch',
                    utils.show_spectrogram_batch(tmp_specs, fnames, config),
                    None)
                writer.add_figure('input_spec_histogram',
                                  utils.get_spectrogram_histogram(tmp_specs),
                                  None)
                del tmp_specs, fnames, aud, fn, i

    # Class Histograms
    if not config.dataset_skip_class_hist:
        fig_classes = utils.get_class_histograms(
            data_loader_train.dataset,
            data_loader_valid.dataset,
            data_loader_test.dataset,
            one_hot_encoder=utils.OneHot
            if config.dataset == 'MNIST' else None,
            data_limit=200 if config.debug_mode else None)
        if writer is not None:
            writer.add_figure('class_histogram', fig_classes, None)

    # =====================================================================
    # Train and Test
    solver = Solver(data_loader_train, data_loader_valid, data_loader_test,
                    config, writer, transforms)
    solver.train()
    scores, true_class, pred_scores = solver.test()

    # =====================================================================
    # Save results

    np.save(open(os.path.join(config.result_dir, 'true_class.npy'), 'wb'),
            true_class)
    np.save(open(os.path.join(config.result_dir, 'pred_scores.npy'), 'wb'),
            pred_scores)

    utils.compare_predictions(true_class, pred_scores, config.result_dir)

    if writer is not None:
        writer.close()
Exemple #7
0
SS_FIG_DIR = "../OUTPUT_BASELINE/sigma3.0"
COMPARISON_DIR = "../OUTPUT_BASELINE/sigma3.0"

ss_init = os.path.join(SS_FIG_DIR, "SS/SS_vars.pkl")
variables = pickle.load(open(ss_init, "rb"))
for key in variables:
    globals()[key] = variables[key]
# params_given = os.path.join(SS_FIG_DIR, "Saved_moments/params_given.pkl")
# variables = pickle.load(open(params_given, "rb"))
# for key in variables:
#     globals()[key] = variables[key]


#globals().update(ogusa.parameters.get_parameters_from_file())
globals().update(parameters.get_parameters(True, {}, {}, {}))
param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
             'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
             'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
             'b_ellipse', 'k_ellipse', 'upsilon',
             'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
             'mtry_params','tau_payroll', 'tau_bq',
             'retire', 'mean_income_data', 'g_n_vector',
             'h_wealth', 'p_wealth', 'm_wealth',
             'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho']

variables = {}
for key in param_names:
    variables[key] = globals()[key]
for key in variables:
    globals()[key] = variables[key]
Exemple #8
0
    #molecule = extraction.setup_calculations()
    """
  th = np.expand_dims(np.array([1.803-0.05, 1.118-0.05, 1.695-0.02]), 0)
  extraction.log_likelihood_density(th)
  sys.exit(0)
  """

    walkers_init = initialize_walkers(data_parameters,
                                      extraction.atom_positions)

    extraction.run_mcmc(walkers_init, data_parameters["run_limit"])


if __name__ == "__main__":

    data_parameters = get_parameters()

    q_max = [10]
    #q_max = [5, 7.5, 10, 12.5, 15, 17.5, 20]
    #sigmas = np.insert(1./(10**np.arange(11)), 0, 0.163)
    ston = [100]
    #ston = [25, 50, 100, 200, 400]
    lmk_arr = [[100, 100]]
    lmk_arr = [[25, 12.5], [25, 25], [25, 50], [25, 100], [12.5, 100],
               [50, 100]]
    options = []

    for bg in ston:
        for lg in lmk_arr:
            adm_params = copy(data_parameters["ADM_params"])
            adm_params["temperature"] = lg[0]
Exemple #9
0
lx = L[0]
ly = L[1]
ka = 1.


A0 = 1.
gamma = 0.04 * ka * A0 # hexagonal network
# gamma = 0.1 * ka * A0 # soft network
Lambda = 0.12 * ka * (A0**(3/2)) # hexagonal network
# # Lambda = -0.85 * ka * A0**(3/2) # soft network


lmin = 0.01
delta_t = 0.05


# get parameter dictionary
parameters = get_parameters(lx, ly, ka, gamma, Lambda, lmin, delta_t)

# get vertices
vertices = read_vertices(vertex_file)

# get edges
edges = read_edges(edge_file)

# get polygons
poly_indices = read_poly_indices(poly_file)
polys = build_polygons(poly_indices, A0)

steepest_descent(vertices, edges, polys, parameters, folder)
Exemple #10
0
'''
------------------------------------------------------------------------
    Create variables for SS baseline graphs
------------------------------------------------------------------------
'''

SS_FIG_DIR = "OUTPUT"
COMPARISON_DIR = "OUTPUT"

ss_init = os.path.join(SS_FIG_DIR, "SSinit/ss_init_vars.pkl")
variables = pickle.load(open(ss_init, "rb"))
for key in variables:
    globals()[key] = variables[key]

#globals().update(ogindia.parameters.get_parameters_from_file())
globals().update(parameters.get_parameters())
param_names = [
    'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
    'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
    'mindist_SS', 'mindist_TPI', 'b_ellipse', 'k_ellipse', 'upsilon',
    'chi_b_guess', 'chi_n_guess', 'etr_params', 'mtrx_params', 'mtry_params',
    'tau_payroll', 'tau_bq', 'retire', 'mean_income_data', 'g_n_vector',
    'h_wealth', 'p_wealth', 'm_wealth', 'omega', 'g_n_ss', 'omega_SS',
    'surv_rate', 'e', 'rho'
]

variables = {}
for key in param_names:
    variables[key] = globals()[key]
for key in variables:
    globals()[key] = variables[key]
        simulator.reset(dspt_times)

    return stop_headways


def get_mean_std(headways):
    return np.around(np.mean(headways),
                     decimals=1), np.around(np.std(headways), decimals=1)


if __name__ == "__main__":
    batch_no = 1
    delta_t, sim_duration, dspt_times, \
        stop_locs, demand_rates, board_rates, stop_num, demand_start_times, \
            link_mean_speeds, link_cv_speeds, link_lengths, link_start_locs, \
                cycle_lengths, green_ratios, signal_offsets, signal_locs, examined_signal \
                    = get_parameters()

    stop_headways_dict = get_result(batch_no, delta_t, sim_duration, dspt_times, \
        stop_locs, demand_rates, board_rates, stop_num, demand_start_times, \
            link_mean_speeds, link_cv_speeds, link_lengths, link_start_locs, \
                cycle_lengths, green_ratios, signal_offsets, signal_locs)

    arr_means = []
    arr_stds = []
    for stop in range(stop_num):
        arr_mean, arr_std = get_mean_std(stop_headways_dict[stop])
        arr_stds.append(arr_std)
        arr_means.append(arr_mean)

    print(arr_means)
                                 path,
                                 img_size,
                                 batch_size,
                                 shuffle=False)

    class_dict = {}

    device = torch.device(config.device)
    model = InceptionV3().to(device)

    data_iter = dataloader.loader()

    index = get_index()

    for batch_idx, (images, labels) in enumerate(data_iter):
        batch = images.to(device)
        preds = model(batch)[0]
        preds = preds.cpu().numpy().reshape((preds.shape[0], preds.shape[1]))
        index.addDataPointBatch(
            preds, range(batch_idx * batch_size, (batch_idx + 1) * batch_size))
        class_dict = save_images(images, labels, batch_idx, batch_size,
                                 class_dict, out_dir)

    create_index(index, os.path.join(out_dir, 'index.bin'))
    with open(os.path.join(out_dir, 'class_dict.json'), 'w') as fp:
        json.dump(class_dict, fp)


if __name__ == "__main__":
    config = get_parameters()
    main(config)
Exemple #13
0
------------------------------------------------------------------------
    Create variables for SS baseline graphs
------------------------------------------------------------------------
'''


SS_FIG_DIR = "OUTPUT"
COMPARISON_DIR = "OUTPUT"

ss_init = os.path.join(SS_FIG_DIR, "SSinit/ss_init_vars.pkl")
variables = pickle.load(open(ss_init, "rb"))
for key in variables:
    globals()[key] = variables[key]

#globals().update(ogusa.parameters.get_parameters_from_file())
globals().update(parameters.get_parameters())
param_names = ['S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age',
             'beta', 'sigma', 'alpha', 'nu', 'Z', 'delta', 'E',
             'ltilde', 'g_y', 'maxiter', 'mindist_SS', 'mindist_TPI',
             'b_ellipse', 'k_ellipse', 'upsilon',
             'chi_b_guess', 'chi_n_guess','etr_params','mtrx_params',
             'mtry_params','tau_payroll', 'tau_bq',
             'retire', 'mean_income_data', 'g_n_vector',
             'h_wealth', 'p_wealth', 'm_wealth',
             'omega', 'g_n_ss', 'omega_SS', 'surv_rate', 'e', 'rho']

variables = {}
for key in param_names:
    variables[key] = globals()[key]
for key in variables:
    globals()[key] = variables[key]
Exemple #14
0
def set_annotate_interact(year, month, violation_code):
    # Build parameters
    params = par_fn.get_parameters()

    months = params[1]
    mo = months[month]

    # Build data
    project_path, db_path = path.project_path(), path.db_path()
    db_dir = os.path.join(project_path + db_path)
    db = create_engine('sqlite:///' + db_dir + 'mke_wibrs_db.db', echo=False)
    Session = sessionmaker(bind=db)
    session = Session()

    query = session.query(mke_wibrs_db).\
                  filter(mke_wibrs_db.date >= year + mo[0],
                         mke_wibrs_db.date <= year + mo[1],
                         getattr(mke_wibrs_db, 'code_' + violation_code) == 1,
                         mke_wibrs_db.addr != None,
                         mke_wibrs_db.zip_code != None,
                         mke_wibrs_db.ald_dist != None,
                         mke_wibrs_db.mpd_dist != None,
                         mke_wibrs_db.ald_dist != 0,
                         mke_wibrs_db.mpd_dist != 0,
                         mke_wibrs_db.voting_ward != None,
                         mke_wibrs_db.x_lon != None,
                         mke_wibrs_db.y_lat != None).all()

    incident_number, date, formatted_addr, ald_dist, mpd_dist = [], [], [], [], []

    for data in query:
        incident_number.append(data.incident_number)
        date.append('{:%Y-%m-%d %H:%M:%S}'.format(data.date))
        formatted_addr.append(data.formatted_addr)
        ald_dist.append(data.ald_dist)
        mpd_dist.append(data.mpd_dist)
    session.close()

    # Format data
    ald_dist_int, mpd_dist_int = [int(i) for i in ald_dist
                                  ], [int(i) for i in mpd_dist]
    ald_dist_cnt, mpd_dist_cnt = Counter(ald_dist_int), Counter(mpd_dist_int)
    ald_dist_srt, mpd_dist_srt = sorted(ald_dist_cnt.items()), sorted(
        mpd_dist_cnt.items())

    # Aldermanic districts & violation counts
    ald_dist = [str(i[0]) for i in ald_dist_srt]
    ald_dist_cnt = [i[1] for i in ald_dist_srt]

    # MPD districts & violation counts
    mpd_dist = [str(i[0]) for i in mpd_dist_srt]
    mpd_dist_cnt = [i[1] for i in mpd_dist_srt]

    ald_source = ColumnDataSource(
        data=dict(ald_dist=ald_dist, ald_dist_cnt=ald_dist_cnt))
    mpd_source = ColumnDataSource(
        data=dict(mpd_dist=mpd_dist, mpd_dist_cnt=mpd_dist_cnt))
    ald_tool_tips = [('District', '@ald_dist'), ('Count', '@ald_dist_cnt')]
    mpd_tool_tips = [('District', '@mpd_dist'), ('Count', '@mpd_dist_cnt')]

    ald_p = figure(x_range=ald_dist,
                   plot_height=275,
                   plot_width=750,
                   toolbar_location='right',
                   tools='hover',
                   tooltips=ald_tool_tips)

    mpd_p = figure(x_range=mpd_dist,
                   plot_height=275,
                   plot_width=750,
                   toolbar_location='right',
                   tools='hover',
                   tooltips=mpd_tool_tips)

    ald_p.vbar(x='ald_dist',
               top='ald_dist_cnt',
               color='#0b5394',
               width=0.8,
               source=ald_source)

    mpd_p.vbar(x='mpd_dist',
               top='mpd_dist_cnt',
               color='#0b5394',
               width=0.8,
               source=mpd_source)

    ald_p.xgrid.grid_line_color, mpd_p.xgrid.grid_line_color, ald_p.toolbar.logo, mpd_p.toolbar.logo = None, None, None, None
    ald_p.y_range.start, mpd_p.y_range.start = 0, 0

    # Dataset metrics
    vio_cnt = sum(ald_dist_cnt)

    return ald_p, mpd_p, vio_cnt
Exemple #15
0
#!/usr/bin/python3
# -*- coding: UTF-8 -*

from concurrent.futures import ProcessPoolExecutor
import pymysql as pymysql
import features as features
import parameters

if __name__ == '__main__':
    parameter_info = parameters.get_parameters()
    for key, value in parameter_info.items():
        print("[" + key + "] : " + value)

    db_source = pymysql.connect(parameter_info['hostname'],
                                parameter_info['username'],
                                parameter_info['password'],
                                parameter_info["database_source"],
                                charset='utf8mb4')
    cursor_source = db_source.cursor()

    sql = "SELECT `ID` FROM `{0}`.`{1}` WHERE ".format(
        parameter_info["database_source"], parameter_info["table_captured"])
    where_info = ""
    types = str(parameter_info["type"]).split(',')
    for index in range(0, len(types)):
        where_info += "`TYPE`='{0}'".format(types[index])
        if index is not len(types) - 1:
            where_info += " OR "
    sql += where_info + ";"
    print(sql)
                                   training_this_round, testing_this_round,
                                   fold_i))
        thread_list.append(t)

    # Starts threads
    for thread in thread_list:
        thread.start()

    for thread in thread_list:
        thread.join()


# início da execução
if __name__ == "__main__":
    # horário de execução, descritor, parâmetros, diretórios, lista de classes e lista de dataset
    start_algorithm = datetime.now()
    arguments = get_arguments()
    parameters = p.get_parameters(arguments['descriptor'], 'part2' in sys.argv,
                                  arguments['neurons'], arguments['output'])
    print(parameters)
    u.create_directories(['data', 'src', 'output'])
    dataset = u.get_dataset_list(u.get_classes_list(parameters['workpath']),
                                 parameters['workpath'])
    k_fold(dataset, len(dataset), parameters, start_algorithm)
    print("Main Start Time:   \t\t\t\t\t\t{}".format(
        start_algorithm.strftime("%Y-%m-%d %H:%M:%S")))
    print("Main End Time:      \t\t\t\t\t\t{}".format(
        datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
    print("Total time running: \t\t\t\t\t\t{}\n".format(datetime.now() -
                                                        start_algorithm))
Exemple #17
0
    if dump_activations and dump_path is not None:
        for idx in range(len(models)):
            save_activations(idx, activations[idx], dump_path)

    # Remove the hooks (as this was intefering with prediction ensembling)
    for idx in range(len(forward_hooks)):
        for hook in forward_hooks[idx]:
            hook.remove()

    # print("selective activations returned are", activations)
    return activations


if __name__ == '__main__':

    args = parameters.get_parameters(options_type='mnist_act', deprecated=True)

    config = vgg_hyperparams.config

    model_list = os.listdir(ensemble_dir)
    num_models = len(model_list)

    train_loader, test_loader = cifar_train.get_dataset(config)

    # Load models
    models = []
    for idx in range(num_models):
        print("Path is ", ensemble_dir)
        print("loading model with idx {} and checkpoint_type is {}".format(
            idx, checkpoint_type))
        models.append(
Exemple #18
0
def kde_interact():

    try:

        project_path, sessions_path = path.project_path(), path.sessions_path()
        sessions_dir = os.path.join(project_path + sessions_path)

        id = []

        with open(sessions_dir, 'rt') as sessions_r:
            active = csv.reader(sessions_r, delimiter=',')
            for _id in active:
                if session['username'] in _id[0]:
                    id.append(session['username'])

        if session['username'] in id:

            params = par_fn.get_parameters()

            years, months, codes, kernels, bandwidths, metrics = params[0],\
                                                                 params[1],\
                                                                 params[2],\
                                                                 params[3],\
                                                                 params[4],\
                                                                 params[5]\

            months, codes, metrics = [month for month in months.keys()],\
                                     [code for code in codes.keys()],\
                                     [metric for metric in metrics.keys()]

            kde = fn.set_kde_startup()
            kde_script, kde_div = components(kde)

            try:

                session_status = 'App ready.'

                if len(request.form) != 0:
                    post_year = str(request.form['kde_form_data_year_select'])
                    post_month = str(
                        request.form['kde_form_data_month_select'])
                    post_violation = str(
                        request.form['kde_form_data_code_select'])
                    post_kernel = str(
                        request.form['kde_form_data_kernel_select'])
                    post_bandwidth = str(
                        request.form['kde_form_data_bandwidth_select'])
                    post_metric = str(
                        request.form['kde_form_data_metric_select'])

                    form_input = [
                        post_year, post_month, post_violation, post_kernel,
                        float(post_bandwidth), post_metric
                    ]

                    codes_post = params[2]

                    post_violation = codes_post[post_violation]

                    kde = fn.set_kde_interact(post_year, post_month,
                                              post_violation, post_kernel,
                                              post_bandwidth, post_metric)

                    datasets = ds_fn.set_annotate_interact(
                        post_year, post_month, post_violation)

                    ald_dist, mpd_dist = datasets[0], datasets[1]

                    kde_script, kde_div = components(kde)
                    ald_script, ald_div = components(ald_dist)
                    mpd_script, mpd_div = components(mpd_dist)
                    vio_cnt = datasets[2]

                    return render_template('mkecs-kde_result.html',
                                           kde_script=kde_script,
                                           kde_div=kde_div,
                                           ald_script=ald_script,
                                           ald_div=ald_div,
                                           mpd_script=mpd_script,
                                           mpd_div=mpd_div,
                                           months=months,
                                           years=years,
                                           codes=codes,
                                           kernels=kernels,
                                           bandwidths=bandwidths,
                                           metrics=metrics,
                                           form_input=form_input,
                                           session_status=session_status,
                                           session_id=session['username'],
                                           vio_cnt=vio_cnt)
            except:

                session_status = 'All fields must be selected.'

                kde = fn.set_kde_startup()
                kde_script, kde_div = components(kde)

                return render_template('mkecs-kde.html',
                                       kde_script=kde_script,
                                       kde_div=kde_div,
                                       months=months,
                                       years=years,
                                       codes=codes,
                                       kernels=kernels,
                                       bandwidths=bandwidths,
                                       metrics=metrics,
                                       session_status=session_status,
                                       session_id=session['username'])

            session_status = 'All fields must be selected.'

            return render_template('mkecs-kde.html',
                                   kde_script=kde_script,
                                   kde_div=kde_div,
                                   months=months,
                                   years=years,
                                   codes=codes,
                                   kernels=kernels,
                                   bandwidths=bandwidths,
                                   metrics=metrics,
                                   session_status=session_status,
                                   session_id=session['username'])
    except:

        return redirect(url_for('mkecs_kde_session'))
Exemple #19
0
 def get(self):
     return get_parameters()
                print(fName)
                with h5py.File(fName, 'w') as hf:
                    hf.create_dataset("q", data=q_map)
                    hf.create_dataset("atm_diffraction", data=atm_diffraction)
                    hf.create_dataset("mol_diffraction", data=mol_diffraction)
                    hf.create_dataset("mol_diffraction_raw",
                                      data=mol_diffraction_raw)
                    hf.create_dataset("mod_diffraction",
                                      data=mol_diffraction / atm_diffraction)
                    hf.create_dataset("detector_distance", data=detector_dist)


if __name__ == '__main__':
    FLAGS = parser.parse_args()
    if FLAGS.molecule is not None:
        params = get_parameters(FLAGS.run, FLAGS.molecule)
    else:
        params = get_parameters(FLAGS.run)
    if FLAGS.molecule is not None:
        params.molecule = FLAGS.molecule
    FLAGS = setup(parser, output_dir=params.simOutputDir)

    # Flag handling
    if FLAGS.calculation_type is not None:
        params.calculation_type = FLAGS.calculation_type
    if FLAGS.xyz_file is not None:
        params.xyz_file = FLAGS.xyz_file
    if FLAGS.output_fileName_suffix is not None:
        params.output_fileName_suffix = FLAGS.output_fileName_suffix
    if FLAGS.basis_folder is not None:
        params.basis_folder = FLAGS.basis_folder
Exemple #21
0
            total += target_var.size(0)
            correct += (predicted == target_var).sum().item()

    return 100 * correct / total


def save_checkpoint(state, is_best, dirpath, epoch):
    filename = 'checkpoint.{}.ckpt'.format(epoch)
    checkpoint_path = os.path.join(dirpath, filename)
    best_path = os.path.join(dirpath, 'best.ckpt')
    torch.save(state, checkpoint_path)
    if is_best:
        shutil.copyfile(checkpoint_path, best_path)
        print('Best Model Saved: ');print(best_path)



def get_current_consistency_weight(epoch):
    return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)




if __name__ == '__main__':
    args = get_parameters()

    args.device = torch.device(
        "cuda:%d" % (args.gpu_id) if torch.cuda.is_available() else "cpu")

    main(args)
Exemple #22
0
def set_kde_interact(year, month, violation_code, kernel, bandwidth, metric):
    # Build parameters
    params = par_fn.get_parameters()

    codes = {'200':'Arson',
             '13':'Assault Offenses',
             '220':'Burglary/Breaking & Entering',
             '290':'Destruction/Damage/Vandalism of Property',
             '09':'Homicide Offenses',
             '23f':'Theft from Motor Vehicle',
             '120':'Robbery',
             '11_36':'Sex Offenses',
             '23':'Larceny/Theft Offenses',
             '240':'Motor Vehicle Theft'}

    months = params[1]
    mo, bw = months[month], str(bandwidth).replace('.', '')

    # Build data
    project_path, db_path = path.project_path(), path.db_path()
    db_dir = os.path.join(project_path + db_path)
    db = create_engine('sqlite:///' + db_dir + 'mke_wibrs_db.db', echo = False)
    Session = sessionmaker(bind = db)
    session = Session()

    query = session.query(mke_wibrs_db).\
                   filter(mke_wibrs_db.date >= year + mo[0],
                          mke_wibrs_db.date <= year + mo[1],
                          getattr(mke_wibrs_db, 'code_' + violation_code) == 1,
                          mke_wibrs_db.addr != None,
                          mke_wibrs_db.zip_code != None,
                          mke_wibrs_db.ald_dist != None,
                          mke_wibrs_db.mpd_dist != None,
                          mke_wibrs_db.voting_ward != None,
                          mke_wibrs_db.x_lon != None,
                          mke_wibrs_db.y_lat != None).all()

    x_lon, y_lat, incident_number, date, formatted_addr, ald_dist, mpd_dist = [], [], [], [], [], [], []

    for data in query:
        x_lon.append(data.x_lon)
        y_lat.append(data.y_lat)
        incident_number.append(data.incident_number)
        date.append('{:%Y-%m-%d %H:%M:%S}'.format(data.date))
        formatted_addr.append(data.formatted_addr)
        ald_dist.append(data.ald_dist)
        mpd_dist.append(data.mpd_dist)
    session.close()

    # Build plot & labels
    tt_src = ColumnDataSource(data = dict(x = x_lon,
                                          y = y_lat,
                                          i = incident_number,
                                          d = date,
                                          a = formatted_addr,
                                          ald = ald_dist,
                                          m = mpd_dist))

    x_range, y_range = [-88.080736, -87.839722], [42.917670, 43.19712]

    p = figure(width = 606,
               height = 700,
               x_range = x_range,
               y_range = y_range,
               tools = 'save')

    plot_title_label = Label(x = -87.89,
                             y = 43.187,
                             text = 'Milwaukee, WI',
                             text_font = 'helvetica',
                             text_font_size = '14px',
                             text_color = '#41444B',
                             text_font_style = 'bold')

    label_src = ColumnDataSource(data = dict(x = [-88.07, -88.07, -88.07, -88.07, -88.07],
                                             y = [42.955, 42.948, 42.941, 42.934, 42.927],
                                             txt = [month + ' ' + year,
                                                    codes[violation_code],
                                                    'Kernel: ' + kernel,
                                                    'Bandwidth: ' + bandwidth,
                                                    'Metric: ' + metric]))

    plot_labels = LabelSet(x = 'x',
                           y = 'y',
                           text = 'txt',
                           text_font = 'helvetica',
                           text_font_size = '8px',
                           text_color = '#41444B',
                           source = label_src)

    hs_src = ColumnDataSource(data = dict(x = [-87.93, -87.93, -87.93],
                                          y = [43.155, 43.148, 43.141],
                                          txt = ['Hotspot identification:',
                                                 '- Click the Point Draw Tool in the upper right corner',
                                                 '- Drag and drop icons on identified hotspots']))

    hs_labels = LabelSet(x = 'x',
                         y = 'y',
                         text = 'txt',
                         text_font = 'helvetica',
                         text_font_size = '12px',
                         text_color = '#41444B',
                         source = hs_src)

    p.add_layout(plot_title_label), p.add_layout(plot_labels), p.add_layout(hs_labels)
    p.xgrid.grid_line_color, p.ygrid.grid_line_color, p.toolbar.logo, = None, None, None
    p.xaxis.visible, p.yaxis.visible = False, False

    # Retrieve/create KDE
    f_yr, f_mo, f_vc = str(year), month.lower()[:3], str(violation_code)
    f_ke, f_bw, f_me = kernel.lower()[:3], str(bandwidth).replace('.', ''), metric.lower()[:3]
    plot_path_long = path.plot_path_long()
    plot_file = os.path.isfile(plot_path_long + '%s%s%s%s%s%s.png' % (f_yr, f_mo, f_vc, f_ke, f_bw, f_me))

    if plot_file:
        plot_path_short = path.plot_path_short()
        p.image_url(url = [plot_path_short + '%s%s%s%s%s%s.png' % (f_yr, f_mo, f_vc, f_ke, f_bw, f_me)],
                    x = x_range[0],
                    y = y_range[1],
                    w = x_range[1] - x_range[0],
                    h = y_range[1] - y_range[0])

    else:
        plot_path_short = path.plot_path_short()
        kde_bw = float(bandwidth)
        kde_fn.exe_kde(x_lon, y_lat, year, month, violation_code, kernel, kde_bw, metric)
        sleep(3)
        p.image_url(url = [plot_path_short + '%s%s%s%s%s%s.png' % (f_yr, f_mo, f_vc, f_ke, f_bw, f_me)],
                           x = x_range[0],
                           y = y_range[1],
                           w = x_range[1] - x_range[0],
                           h = y_range[1] - y_range[0])

    events = p.square(x = 'x',
                      y = 'y',
                      size = 20,
                      color = 'blue',
                      fill_alpha = 0.0,
                      line_color = None,
                      source = tt_src)

    tt_hover = HoverTool(tooltips = [('WIBRS', '@i'), ('Address','@a'), ('Date', '@d'), ('Ald/MPD', '@ald, @m')],
                         renderers = [events])
    p.add_tools(tt_hover)

    # Hotspot identification
    hs_src = cds({'x': [-87.9305, -87.911, -87.8915, -87.872, -87.8525],
                  'y': [43.17, 43.17, 43.17, 43.17, 43.17]})

    hs_render = p.circle_cross(x = 'x',
                               y = 'y',
                               size = 40,
                               color = 'green',
                               fill_alpha = 0.25,
                               line_width = 1,
                               source = hs_src)

    hs_plot = PointDrawTool(renderers = [hs_render],
                            empty_value = None,
                            add = False)

    p.add_tools(hs_plot)

    p.toolbar.active_tap = hs_plot

    return p
Exemple #23
0
    def __init__(self,pc_file_u,pc_file_v,
                 covfile,
                 covfile_sublayer=None,
                 pc_size=-1,
                 params={},
                 preset=None):
        """
        Initialize PCAFlow object.

        Parameters
        ----------
        pc_file_u, pc_file_v : string
            Files containing the principal components in horizontal and
            vertical direction, respectively.
            These files should be .npy files, in which each row is a flattened
            principal component (i.e., the total size of these principal
            component matrices is NUM_PC x (WIDTH*HEIGHT).

        cov_file : string
            File containing the covariance matrix of size NUM_PC x NUM_PC for 
            PCA-Flow.

        covfile_sublayer : string, optional
            File containing the covariance matrix for the layers (usually
            biased towards the first PCs).
            If PCA-Layers is used and this file is not given, use cov_file.

        pc_size : tuple, optional
            Size of principal components. Only required if PCs are not of size
            512x256 or 1024x436.

        params : dict, optional
            Parameters. See parameters.py for documentation of parameters.

        preset : string
            Preset with useful parameter values for different datasets.
            Can be one of
                'pcaflow_sintel'
                'pcalayers_sintel'
                'pcaflow_kitti'
                'pcalayers_kitti'

        """

        np.random.seed(1)

        self.params = defaults.get_parameters(params,preset)

        cprint('[PCAFlow] Initializing.', self.params)

        NC = int(self.params['NC'])
        self.NC = NC

        pc_u = np.load(pc_file_u)
        pc_v = np.load(pc_file_v)
        cov_matrix = np.load(covfile).astype('float32')

        if covfile_sublayer is not None:
            cov_matrix_sublayer = np.load(covfile_sublayer).astype('float32')
        else:
            cov_matrix_sublayer = None
       
        pc_w = 0
        pc_h = 0

        if pc_size==-1:
            # Try to guess principal component dimensions
            if pc_u.shape[1] == 1024*436:
                cprint('[PCAFLOW] Using PC dimensionality 1024 x 436', self.params)
                pc_w = 1024
                pc_h = 436
            elif pc_v.shape[1] == 512*256:
                cprint('[PCAFLOW] Using PC dimensionality 512 x 256', self.params)
                pc_w = 512
                pc_h = 256
            else:
                print('[PCAFLOW] *** ERROR *** ')
                print('[PCAFLOW] Could not guess dimensionality of principal components.')
                print('[PCAFLOW] Please provide as parameter.')
                sys.exit(1)


        self.PC = []

        # Smooth principal components.
        self.pc_u = self.filter_pcs(pc_u,(pc_w,pc_h)).astype('float32')
        self.pc_v = self.filter_pcs(pc_v,(pc_w,pc_h)).astype('float32')

        self.cov_matrix = cov_matrix
        
        self.pc_w = pc_w
        self.pc_h = pc_h

        self.reshape_features=True

        ###############################
        # Feature matcher
        ###############################
        if self.params['features'].lower() == 'libviso' and libviso_available:
            self.feature_matcher = FeatureMatcherLibviso(self.params)
        elif self.params['features'].lower() == 'orb':
            self.feature_matcher = FeatureMatcherORB(self.params)
        elif self.params['features'].lower() == 'fast':
            self.feature_matcher = FeatureMatcherFast(self.params)
        elif self.params['features'].lower() == 'akaze' or not libviso_available:
            self.feature_matcher = FeatureMatcherAKAZE(self.params)
        else:
            print('[PCAFLOW] *** ERROR ***')
            print('[PCAFLOW] Unknown feature type {}. Please use "libviso" or "fast".'.format(self.params['features']))
            sys.exit(1)

        if self.params['n_models'] <= 1:
            ##############################
            # Solver for PCA-Flow
            ##############################
            self.solver = RobustQuadraticSolver(self.pc_u,
                                                self.pc_v,
                                                self.cov_matrix,
                                                pc_size=(pc_w,pc_h),
                                                params=self.params)


        else:
            ############################## 
            # Solver for PCA-Layers
            ##############################  
            self.solver = EMSolver(self.pc_u, self.pc_v,
                                   self.cov_matrix,
                                   pc_size = (pc_w,pc_h),
                                   params=self.params,
                                   cov_matrix_sublayer=cov_matrix_sublayer)

        self.images = deque(maxlen=2)

        cprint('[PCAFLOW] Finished initializing.',self.params)
Exemple #24
0
import parameters
import config
import market
import energy_source
import mpc_solver
import matplotlib.pyplot as plt
import json
import numpy as np
from time import sleep
if True:
    data = parameters.get_parameters()

    config = config.Config(**data['config'])
    energy_sources = [energy_source.EnergySource(**kwargs) for kwargs in data['energy_sources']]
    for ess in energy_sources:
        ess.tuning_parameter_fit()
    markets = [market.Market(**kwargs) for kwargs in data['markets']]
    algo2 = mpc_solver.MPCSolver(config=config, markets=markets, energy_sources=energy_sources)

    results = algo2.solve([[1.80, 1.81] for i in range(len(markets))], ['free', 'free', 'free'])

    with open('t.json', 'w') as f:
        json.dump(results, f)

with open('t.json', 'r') as f:
    results = json.load(f)
      
for key in results[0].keys():
    values = []
    for record in results:
        values.append(record[key])
Exemple #25
0
            parameters["database_distribution"], source_id, col_names, col_values,
            ks_ipd[1], kld_ipd, ttest_ipd[1], whitney_ipd[1], ansari_ipd[1], wasserstein_ipd, energy_ipd,
            ks_count[1], kld_count, ttest_count[1], whitney_count[1], ansari_count[1], wasserstein_count, energy_count,
            sourceID, paraID)

        # print(sql)
        cursor_distribution.execute(sql)

    db_distribution.commit()
    cursor_distribution.close()
    db_distribution.close()


if __name__ == '__main__':
    print("Start Test")
    parameters = parameters.get_parameters()
    db_distribution = pymysql.connect(parameters['hostname'],
                                      parameters['username'],
                                      parameters['password'],
                                      parameters["database_distribution"],
                                      charset='utf8mb4')
    cursor_distribution = db_distribution.cursor()

    sql = "SELECT `IPDDIS`,`COUNTDIS` FROM `{0}`.`distribution_31` " \
          "WHERE `PARAID`='0' AND `SOURCEID`='31' LIMIT 1;".format(parameters["database_distribution"])
    cursor_distribution.execute(sql)
    cursor_result = cursor_distribution.fetchone()
    referenceIPDValues_excellent = dict_to_dis_list(
        json_map_to_dict(cursor_result[0]))
    referenceCountValues_excellent = dict_to_dis_list(
        json_map_to_dict(cursor_result[1]))
import patterns
import initialisation
import iteration
import correlations

import matplotlib as mpl
import matplotlib.pyplot as plt
if os.environ.get('DISPLAY', '') == '':
    print('no display found. Using non-interactive Agg backend')
    mpl.use('Agg')

# plt.ion()

dt, tSim, N, S, p, num_fact, p_fact, dzeta, a_pf, eps, cm, a, U, T, w, \
    tau_1, tau_2, tau_3_A, tau_3_B, g_A, beta, g, t_0, tau, cue_ind, \
    random_seed = get_parameters()

cue_ind = 0
rd.seed(random_seed)

lambdas = np.nan * np.ones((int(N * a), int(N * a)))

max_C1 = 2 * int(N * a * a / S)
max_C2 = int(2 * N * a * a * (S - 1) / S)
n_C1 = min(max_C1 + 1, 25)
n_C2 = min(max_C2 + 1, 25)

ksi_i_mu, delta__ksi_i_mu__k = patterns.get_uncorrelated()

for C2 in np.linspace(0, 20, 21, dtype=int):
    for C1 in np.linspace(0, 10, 11, dtype=int):
def run():
    if sys.platform == 'win32':
        bar_value = '#'
    else:
        bar_value = '█'

    start = datetime.now()
    path = './data/database.db'
    conn = sqlite3.connect(path)

    c = conn.cursor()
    c.execute(
        '''CREATE TABLE IF NOT EXISTS treinamento (image_name TEXT PRIMARY KEY,
                                                                                            HOG BLOB,
                                                                                            LBP BLOB)'''
    )

    c.execute(
        '''CREATE TABLE IF NOT EXISTS testes (image_name TEXT PRIMARY KEY,
                                                                                                 HOG BLOB ,
                                                                                                 LBP BLOB)'''
    )

    hog = p.get_parameters('HOG', True)
    lbp = p.get_parameters('LBP', True)

    try:
        values = {}

        values['HOG'] = imagelib.getHog('./data/img_test.png',
                                        hog['descriptor_param_1'],
                                        hog['descriptor_param_2'],
                                        hog['descriptor_param_3'])

        values['LBP'] = imagelib.getLBP('./data/img_test.png',
                                        lbp['descriptor_param_1'],
                                        lbp['descriptor_param_2'])

        c.execute('INSERT OR IGNORE INTO treinamento VALUES (?, ?, ?)',
                  ['img_test.png', values['HOG'], values['LBP']])

    except Exception:
        raise

    dataset = u.get_dataset_list(
        u.get_classes_list('./data/dataset2/treinamento'),
        './data/dataset2/treinamento')
    count = 1

    for line in dataset:
        widgets = [
            'Training Letter: {} | '.format(u.get_letter(line[0], True)),
            progressbar.Percentage(), ' (',
            progressbar.Counter(), ' of ',
            str(len(line)), ') ',
            progressbar.Bar(bar_value), '  ',
            progressbar.ETA()
        ]
        with progressbar.ProgressBar(widgets=widgets,
                                     max_value=len(line)) as bar:
            for i, name in enumerate(line):
                values = {}

                try:
                    values['HOG'] = imagelib.getHog(hog['workpath'] + name,
                                                    hog['descriptor_param_1'],
                                                    hog['descriptor_param_2'],
                                                    hog['descriptor_param_3'])

                    values['LBP'] = imagelib.getLBP(lbp['workpath'] + name,
                                                    lbp['descriptor_param_1'],
                                                    lbp['descriptor_param_2'])

                    c.execute(
                        'INSERT OR IGNORE INTO treinamento VALUES (?, ?, ?)',
                        [name, values['HOG'], values['LBP']])
                except Exception:
                    raise

                count += 1
                bar.update(i)

        conn.commit()

    dataset = u.get_dataset_list(u.get_classes_list('./data/dataset2/testes'),
                                 './data/dataset2/testes')

    for line in dataset:
        widgets = [
            'Testing Letter: {} | '.format(u.get_letter(line[0], True)),
            progressbar.Percentage(), ' (',
            progressbar.Counter(), ' of ',
            str(len(line)), ') ',
            progressbar.Bar(bar_value), '  ',
            progressbar.ETA()
        ]
        with progressbar.ProgressBar(widgets=widgets,
                                     max_value=len(line)) as bar:
            for i, name in enumerate(line):
                values = {}

                try:
                    values['HOG'] = imagelib.getHog(hog['testpath'] + name,
                                                    hog['descriptor_param_1'],
                                                    hog['descriptor_param_2'],
                                                    hog['descriptor_param_3'])

                    values['LBP'] = imagelib.getLBP(lbp['testpath'] + name,
                                                    lbp['descriptor_param_1'],
                                                    lbp['descriptor_param_2'])

                    c.execute('INSERT OR IGNORE INTO testes VALUES (?, ?, ?)',
                              [name, values['HOG'], values['LBP']])
                except Exception:
                    raise

                count += 1
                bar.update(i)
        conn.commit()
    c.close()
    end = datetime.now()
    print('\nTotal of images described: {} + test image.'.format(
        str(count - 1).zfill(5)))
    print('Database in {}\n'.format(path))
    print("Start Time: {}".format(start.strftime("%d/%m/%Y %H:%M:%S")))
    print("End Time: {}".format(end.strftime("%d/%m/%Y %H:%M:%S")))
    print('Total Elapsed Time: {}\n'.format(end - start))
Exemple #28
0
'''

SS_FIG_DIR = "../OUTPUT_BASELINE/sigma3.0"
COMPARISON_DIR = "../OUTPUT_BASELINE/sigma3.0"

ss_init = os.path.join(SS_FIG_DIR, "SS/SS_vars.pkl")
variables = pickle.load(open(ss_init, "rb"))
for key in variables:
    globals()[key] = variables[key]
# params_given = os.path.join(SS_FIG_DIR, "Saved_moments/params_given.pkl")
# variables = pickle.load(open(params_given, "rb"))
# for key in variables:
#     globals()[key] = variables[key]

#globals().update(ogusa.parameters.get_parameters_from_file())
globals().update(parameters.get_parameters(True, {}, {}, {}))
param_names = [
    'S', 'J', 'T', 'BW', 'lambdas', 'starting_age', 'ending_age', 'beta',
    'sigma', 'alpha', 'nu', 'Z', 'delta', 'E', 'ltilde', 'g_y', 'maxiter',
    'mindist_SS', 'mindist_TPI', 'b_ellipse', 'k_ellipse', 'upsilon',
    'chi_b_guess', 'chi_n_guess', 'etr_params', 'mtrx_params', 'mtry_params',
    'tau_payroll', 'tau_bq', 'retire', 'mean_income_data', 'g_n_vector',
    'h_wealth', 'p_wealth', 'm_wealth', 'omega', 'g_n_ss', 'omega_SS',
    'surv_rate', 'e', 'rho'
]

variables = {}
for key in param_names:
    variables[key] = globals()[key]
for key in variables:
    globals()[key] = variables[key]