コード例 #1
0
def main(args):

    dates_list = get_date_formats(
        start_date=f"{args.year}-01-01",
        end_date=f"{args.year}-12-01",
        freq=args.freq,
        sep=args.sep,
    )
    logger.info(dates_list)
    file_root_location = f"{os.getcwd()}"
    file_dir = args.dir
    claims_file_location = f"{file_root_location}/{file_dir}"

    if not os.path.exists(claims_file_location):
        os.mkdir(claims_file_location)
    for date in dates_list:
        claims_file = f"claimsdata_{date}.txt"
        final_file_location = f"{claims_file_location}/{claims_file}"
        logger.info(final_file_location)
        claims_data = pd.DataFrame(generate_data(n=args.num, date_of_service=date))
        claims_data.to_csv(
            final_file_location, header=None, index=None, sep="\t", encoding="utf-8"
        )
コード例 #2
0
import keras.backend.tensorflow_backend
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.55)
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
keras.backend.tensorflow_backend.set_session(session)

import matplotlib.pyplot as plt
from keras.models import model_from_json
import numpy as np
from functions import file_list, generate_data
from scipy import stats

train_file, train_label, validation_file, validation_label, test_file, test_label = file_list(
)
train = generate_data(directory='/home/ekcontar/dat/',
                      mode='augmentation',
                      shuffle=True,
                      batch_size=10,
                      file_list=train_file,
                      label=train_label)
validation = generate_data(directory='/home/ekcontar/dat/',
                           mode='rescale',
                           shuffle=True,
                           batch_size=10,
                           file_list=validation_file,
                           label=validation_label)
test = generate_data(directory='/home/ekcontar/dat/',
                     mode='rescale',
                     shuffle=False,
                     batch_size=10,
                     file_list=test_file,
                     label=test_label)
print('burada test= dediğim işlemi yaptım')
コード例 #3
0
import tensorflow as tf
import keras.backend.tensorflow_backend
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.55)
session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
keras.backend.tensorflow_backend.set_session(session)

import matplotlib.pyplot as plt
from keras.models import model_from_json
import numpy as np
from functions import file_list, generate_data

train_file, train_label, validation_file, validation_label, test_file, test_label = file_list(
)
train = generate_data(directory=cf.DATA_CONFIG['data_folder'] + 'image_data/',
                      mode='augmentation',
                      shuffle=True,
                      batch_size=10,
                      file_list=train_file,
                      label=train_label)
validation = generate_data(directory=cf.DATA_CONFIG['data_folder'] +
                           'image_data/',
                           mode='rescale',
                           shuffle=True,
                           batch_size=10,
                           file_list=validation_file,
                           label=validation_label)
test = generate_data(directory=cf.DATA_CONFIG['data_folder'] + 'image_data/',
                     mode='rescale',
                     shuffle=False,
                     batch_size=10,
                     file_list=test_file,
                     label=test_label)
コード例 #4
0
import matplotlib as plt
'''
This is the "main" file, and is where the actual architecture is defined.
Additionally, this is where the batch iteration takes place, and where
the learning rates, number of epochs, and other parameters are defined.

'''

# ----- Debugging parameters -----
config.show_calls = False
config.show_shapes = False
torch.set_grad_enabled(False)
torch.set_default_dtype(torch.float64)

# ----- Loading the data -----
train_features, train_labels = generate_data(1000)
test_features, test_labels = generate_data(1000)

#  ----- Define the paramters for learning -----
nb_classes = train_labels.shape[0]
features = train_features.size(1)
nb_samples = train_features.size(0)
epsilon = 0.1
eta = .2  #nb_samples is now defined in Sequential()
batch_size = config.batch_size
epochs = int(config.epochs / (nb_samples / batch_size))

# Zeta is to make it work correctly with Sigma activation function.
# train_label = train_label.add(0.125).mul(0.8)
# test_label = test_label.add(0.125).mul(0.8)
コード例 #5
0
from functions import chip_clas, remove_noise, generate_data
import pandas as pd
import numpy as np

results = []
runtimes = []

for d in range(2, 15, 2):

    X, y = generate_data(d=d, nrow=100, mean1=3, mean2=6, sd1=0.5, sd2=0.5)

    # Filtering data:
    X_new, y_new = remove_noise(X, y)

    # Comparing methods:
    method = ["nn_clas", "parallel", "extreme_search"]

    for model in method:
        y_hat, y_test, result, runtime = chip_clas(X_new,
                                                   y_new,
                                                   method=model,
                                                   kfold=5)

        print(
            " \n Dimension: {0}\n Method: {1} \n Avarege AUC: {2:.4f} \n Std. Deviation {3:.4f} \n Avarege Runtime: {4:.4f} \n"
            .format(d, model,
                    result.mean()[0],
                    result.std()[0],
                    runtime.mean()[0]))

        results.append(result.mean)
コード例 #6
0
                                min_value=2,
                                max_value=7,
                                value=4)
    num_obs = input_form.slider("Number of Observation",
                                min_value=100,
                                max_value=10000,
                                value=1000,
                                step=100)
    arm_prob_dist = input_form.radio("Probability Across Arms",
                                     ("Random (similar across arms)", "Bias"))
    simulate_button = input_form.form_submit_button("Simulate")

    display_col.markdown("### Arms Movement")

    if simulate_button:
        df, probs = generate_data(num_arm, num_obs, arm_prob_dist)
        slot_selected, rewards, penalties, total_reward, beta_params = single_step_sim(
            df)

        input_col.write(df.head(20))
        display_col.write(probs)
        display_col.write(beta_params.tail(20))
        fig, axs = plot_arm_dist(beta_params)
        display_col.write(fig)

        snapshot_dist, unpivot_dist = get_df_distribution(beta_params)
        last_snapshot = unpivot_dist[unpivot_dist['iteration'] ==
                                     unpivot_dist['iteration'].max()]
        # fig = px.line(last_snapshot, x="x", y="y_values", color="y_group", title='Arm Movement' % (unpivot_dist['iteration'].max()))
        # display_col.write(fig)
コード例 #7
0
option = int(sys.argv[1])
num_points = int(sys.argv[2])
seed = int(sys.argv[3])
nu = float(sys.argv[4])
data_type = int(sys.argv[5])
'''
Aquí generamos los dos datasets:
    El de training, para generar el modelo, es decir, w y gamma.
    El de test, para clasificar nuevos puntos con el modelo anterior
    y ver cómo de bien clasifica puntos con los que no ha modelado.
'''

print("\nCalculando resultados...\n")

# Generamos los datos de entrenamiento
Atr, ytr = fun.generate_data(num_points, seed, data_type, False)
fun.write_ampl(Atr, ytr, nu, option)

# Si no se usa RBF, generamos datos de test

if data_type != 3:
    random.seed(time.time())
    seed2 = random.randint(0, 1e6)
else:
    seed2 = seed
Ate, yte = fun.generate_data(num_points, seed2, data_type, True)

# Leemos el modelo y los datos
if option == 1: ampl.read('./primal.mod')
else: ampl.read('./dual.mod')
コード例 #8
0
project_folders = [
    'django',
    'flask',
    'pyramid',
    'reddit',
    'requests',
    'sqlalchemy',
]

if(sys.argv[1] == 'clone'):
    f.clone_repository(sys.argv[2])
elif(sys.argv[1] == 'vnwords'):
    if(sys.argv[2] == 'verbs'):
        f.generate_report(
            f.generate_data(project_folders, vf.get_top_verbs_in_path),
            sys.argv[3])
    elif(sys.argv[2] == 'nouns'):
        f.generate_report(
            f.generate_data(project_folders, nf.get_top_nouns_in_path),
            sys.argv[3])
    else:
        print('enter an arguments')
elif(sys.argv[1] == 'allwords'):
    if(sys.argv[2] == 'func'):
        f.generate_report(
            f.generate_data(project_folders, wf.get_functions_words),
            sys.argv[3])
    elif(sys.argv[2] == 'vars'):
        f.generate_report(
            f.generate_data(project_folders, wf.get_vars_words),