Пример #1
0
def update_teamwinner_styles(prepost_or_year, league, teamname, year):
    if not teamname:
        return {"display": "none"}, {"display": "none"}

    if prepost_or_year == "prepost":
        # Below 5 lines are for making the team graph disappear when changing leagues
        df_pre, df_post = read_data(prepost_or_year, league, year)
        df = pd.concat([df_pre, df_post])
        all_teams = np.sort(
            df["homeTeamName"].unique()
        )  # Assuming all teams have played home at least once
        if teamname not in all_teams:
            return {"display": "none"}, {"display": "none"}

        style_double_teamwinner_div = {"display": "block"}
        style_single_teamwinner_div = {"display": "none"}
        return style_single_teamwinner_div, style_double_teamwinner_div
    elif prepost_or_year == "year":
        # Below 5 lines are for making the team graph disappear when changing leagues
        df = read_data(prepost_or_year, league, year)
        all_teams = np.sort(
            df["homeTeamName"].unique()
        )  # Assuming all teams have played home at least once
        if teamname not in all_teams:
            return {"display": "none"}, {"display": "none"}

        style_single_teamwinner_div = {"display": "block"}
        style_double_teamwinner_div = {"display": "none"}
        return style_single_teamwinner_div, style_double_teamwinner_div
Пример #2
0
def set_teamselector_options(prepost_or_year, league, year):
    if prepost_or_year == "prepost":
        df_pre, df_post = read_data(prepost_or_year, league, year)
        df = pd.concat([df_pre, df_post])
    elif prepost_or_year == "year":
        df = read_data(prepost_or_year, league, year)

    return [
        {"label": team, "value": team} for team in np.sort(df["homeTeamName"].unique())
    ]
Пример #3
0
def update_single_winner_graph(prepost_or_year, league, year):
    if prepost_or_year == "prepost":
        raise PreventUpdate

    df = read_data(prepost_or_year, league, year)
    df_winner = (
        df["winner"]
        .value_counts()
        .reindex(REORDERLIST)
        .rename_axis("Winning team")
        .reset_index(name="Counts")
    )
    fig = go.Figure(
        data=[
            go.Bar(
                x=df_winner["Winning team"], y=df_winner["Counts"], marker_color=COLORS
            )
        ]
    )

    update_axes(fig)

    winner_text = f"## Total number of home wins, draws and away wins in the {league}"

    return fig, winner_text
Пример #4
0
def update_single_teamwinner_graph(prepost_or_year, league, year, teamname):
    if prepost_or_year == "prepost":
        raise PreventUpdate
    elif prepost_or_year == "year":
        df = read_data(prepost_or_year, league, year)

    all_teams = np.sort(
        df["homeTeamName"].unique()
    )  # Assuming all teams have played home at least once

    if teamname not in all_teams:
        return {}, ""

    dff = df[["winner", "homeTeamName", "awayTeamName"]]
    df_teams = pd.DataFrame(
        0, index=["HOME_TEAM", "AWAY_TEAM", "DRAW"], columns=list(all_teams)
    )
    df_teams = fill_df_teams(dff, df_teams)

    teamwinner_graph = go.Figure(
        data=[
            go.Bar(
                x=df_teams[teamname].index,
                y=df_teams[teamname].tolist(),
                marker_color=COLORS,
            )
        ]
    )

    update_axes(teamwinner_graph)

    teamwinner_text = f"### Home wins, draws and away wins for {teamname}"

    return teamwinner_graph, teamwinner_text
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()

    ap.add_argument("-d",
                    "--data_dir",
                    required=True,
                    help="Path to the images directory")
    ap.add_argument("-m",
                    "--model_path",
                    required=True,
                    help="Path to the the model")
    ap.add_argument("-i",
                    "--input",
                    type=int,
                    required=True,
                    default=299,
                    help="The input size")
    ap.add_argument("-o",
                    "--output",
                    required=True,
                    help="Path to the output file")

    args = vars(ap.parse_args())
    size = args['input']

    # model
    print("Loading model...")
    subdir = args["model_path"]
    model_path = glob.glob(subdir + '*.h5')[-1]
    model = load_model(model_path)

    # data
    print("Reading data...")
    filenames, _, _ = read_data(args["data_dir"])
    n_files = len(filenames)

    # encoding
    print("Encoding images...")
    index_to_filename = {}
    filename_to_path = {}
    features = np.zeros((n_files, model.output.shape[1]))
    for i in tqdm.tqdm(range(n_files)):
        image_id = extract_image_id(filenames[i])
        index_to_filename[i] = image_id
        filename_to_path[image_id] = filenames[i]
        #print("->", image_id)
        image = load_image(filenames[i], (size, size))
        image = image.reshape((1, ) + image.shape)

        features[i] = np.squeeze(model(image))

    # save transfer values
    np.save(args["output"], features)
    with open("index_to_filename.json", "w") as f:
        json.dump(index_to_filename, f, indent=4, ensure_ascii=False)
    with open("filename_to_path.json", "w") as f:
        json.dump(filename_to_path, f, indent=4, ensure_ascii=False)
Пример #6
0
def read_data(arg):
    X, Y = functions.read_data(arg)
    X, Y = np.array(X), np.array(Y)
    X = np.reshape(X, (X.shape[0], X.shape[1]))
    Y = np.reshape(Y, (X.shape[0]))
    X_test = X[2000:, :]
    Y_test = Y[2000:]

    return X_test, Y_test
Пример #7
0
def main():
    root = Tk()
    root.geometry("800x100")

    data = fn.read_data()
    data = data.iloc[::-1]
    query_list = []
    b = guii.db(root, data, query_list)

    root.mainloop()
Пример #8
0
def ResNet(i):
    for j in range(0, 1):
        X, Y = functions.read_data(i)
        X, Y = np.array(X), np.array(Y)
        X = np.reshape(X, (X.shape[0], X.shape[1], 1, 1))
        Y = np.reshape(Y, (X.shape[0], 1))
        X_train = X[0:2000, :, :, :]
        Y_train = Y[0:2000]
        X_test = X[2000:, :, :, :]
        Y_test = Y[2000:]
        file_name = i + "_ResNet"
        predict(X_train, Y_train, X_test, Y_test, file_name)
Пример #9
0
def update_double_winner_graph(prepost_or_year, league, year):
    if prepost_or_year == "year":
        raise PreventUpdate

    df_pre, df_post = read_data(prepost_or_year, league, year)
    df_winner_pre = (
        df_pre["winner"]
        .value_counts()
        .reindex(REORDERLIST)
        .rename_axis("Winning team")
        .reset_index(name="Counts")
    )
    df_winner_post = (
        df_post["winner"]
        .value_counts()
        .reindex(REORDERLIST)
        .rename_axis("Winning team")
        .reset_index(name="Counts")
    )

    fig_pre = go.Figure(
        data=[
            go.Bar(
                x=df_winner_pre["Winning team"],
                y=df_winner_pre["Counts"],
                marker_color=COLORS,
            )
        ]
    )
    fig_post = go.Figure(
        data=[
            go.Bar(
                x=df_winner_post["Winning team"],
                y=df_winner_post["Counts"],
                marker_color=COLORS,
            )
        ]
    )

    update_axes(fig_pre)
    update_axes(fig_post)

    winner_text = f"## Total number of home wins, draws and away wins in the {league} before and after corona."

    return fig_pre, fig_post, winner_text
Пример #10
0
import functions as f

attributes, data, output = f.read_data("train.txt")
# TODO: implement reading data with "Counts" value
conditional_probabilities = f.calculate_conditional_probabilities(
    attributes, data, output)
print(conditional_probabilities)
Пример #11
0
def load_data(config):
    positions, data = fov.read_data(config)
    print('Data loaded, shape:', data.shape)
    return positions, data
Пример #12
0
import glob
import functions

files = glob.glob('../GPX/*gpx')
df = functions.read_data(files)
df.to_pickle('Coordinates_data.pkl')
#functions.make_heatmap(df, radius=10, blur=15, min_opacity=0.4,
#                        save_as='heatmap.html')
Пример #13
0
# Fs = 2560;            # Sampling frequency
# T = 1/Fs;             # Sampling period
# L = 38400;             # Length of signal
# t_ = (0:L-1)*T;        # Time vector

# h_path = r"D:/files/SPEC course/Health detection_SVM/Training/Healthy"
# u1_path = r"D:/files/SPEC course/Health detection_SVM/Training/Faulty/Unbalance 1"
# u2_path = r"D:/files/SPEC course/Health detection_SVM/Training/Faulty/Unbalance 2"
h_path = r"Training/Healthy"
u1_path = r"Training/Faulty/Unbalance 1"
u2_path = r"Training/Faulty/Unbalance 2"
test_path = r"Testing"

#time domain
h_data_t = functions.read_data(h_path)
u1_data_t = functions.read_data(u1_path)
u2_data_t = functions.read_data(u2_path)
data_t = [h_data_t, u1_data_t, u2_data_t]

# h_feature_t = functions.time_features(h_data_t)
# u1_feature_t = functions.time_features(u1_data_t)
# u2_feature_t = functions.time_features(u2_data_t)

#frequancy domain
h_data_f = functions.fft_(h_data_t)
u1_data_f = functions.fft_(u1_data_t)
u2_data_f = functions.fft_(u2_data_t)

#add up features
h_feature = h_data_f
Пример #14
0
from functions import read_data, h_demands, h_profits, tot_profits, find_next, update_demand, block_time, tot_h_dems, plot_help, kpis
import time

# current asumptions im usure about
# - planes must leave the hub in the morning and get back to the hub before the end of the day
# ( does each ac need to leave and get back form the hub ?)
# - currently all routes can be used, not only the ones including the hub 
# (not that it matters as all routes only use routes with the hub)

# have fun with checking it and have a nice holidays, and well done for finishing up the last assigment in time 

s = time.time()

# get all the necessary info for the main loop
demand, loc, comp, ac_prof, ac, dist = read_data()

# get the hourly demand 
h_demand = h_demands(demand,loc)


# set up the while loop and the final list to which the answer is stored
final_paths = []
profitable = True

# main loop of the code, it will run until run out of planes or no more planes are profitable
while profitable:
    
    # gets total demand for each hour (+1,0,-1,2)
    tot_h_dem = tot_h_dems(h_demand)
    
Пример #15
0
import numpy as np
import tensorflow as tf
from functions import read_data, neural_net_model

X_train, y_train, X_test, y_test, df_test, df_train = read_data()

xs = tf.placeholder("float")
xd = tf.placeholder("float")
ys = tf.placeholder("float")

output = neural_net_model(xs, 3)
cost = tf.reduce_mean(tf.square(output - ys))
# our mean squared error cost function

train = tf.train.GradientDescentOptimizer(0.001).minimize(cost)

c_t = []
saver = tf.train.Saver()
# Gradinent Descenwith tf.Session() as sess:
with tf.Session() as sess:
    # Initiate session and initialize all vaiables
    sess.run(tf.global_variables_initializer())
    for i in range(50):
        for j in range(X_train.shape[0]):
            sess.run(train,
                     feed_dict={
                         xs: X_train[j, :].reshape(1, 3),
                         ys: y_train[j]
                     })
            # Run train with each sample
        c_t.append(sess.run(cost, feed_dict={xs: X_train, ys: y_train}))
Пример #16
0
from keras.preprocessing import sequence
from keras.layers import Dropout
from keras.models import model_from_json
from keras.models import load_model
from nltk.tokenize import RegexpTokenizer

path = '/home/mark/Research'
data_dir = path + '/data'

train = True
load_all = True

weight_matrix, word_index = functions.load_embeddings(data_dir +
                                                      '/glove.6B.100d.txt')

data = functions.read_data(data_dir)
train, test, val = functions.split_data(data, .8, data_dir)

train = train.reset_index()
test = test.reset_index()
val = val.reset_index()

#max_length, avg_words, seq_length = functions.maxLen(data)
train_x = functions.pipeline(train, word_index, weight_matrix)
test_x = functions.pipeline(test, word_index, weight_matrix)
val_x = functions.pipeline(val, word_index, weight_matrix)

train_y = functions.labels(train)
test_y = functions.labels(test)
val_y = functions.labels(val)
Пример #17
0
def run_task(task_input):

    inputParam, username, current_run = task_input
    user_folder = "uploads/" + username
    run_folder = os.path.join(user_folder, str(current_run))

    try:
        # Reading of Dataset and Metadata
        pathData = os.path.abspath("uploads/dataset/" +
                                   inputParam['data_file'])
        dataset = read_data(pathData)
        # pathMeta = cache(url = os.path.abspath("uploads/metadata/" + inputParam["meta_file"]))
        # metadata = read_meta(pathMeta)

        # Using Cornac
        eval_method = select_eval(inputParam["evalmethod"], dataset)
        model = select_model(inputParam)
        metrics = select_metrics(inputParam["metrics"], inputParam)

        exp = cornac.Experiment(eval_method=eval_method,
                                models=[model],
                                metrics=metrics,
                                user_based=True)
        exp.run()
        exp_result = str(exp.result)
        result = exp_result.split("\n")

        # Splitting the output
        output = []
        for line in result:
            if '|' in line:
                store = []
                for data in line.split('|'):
                    store.append(data)
                output.append(store)

        # Creating the run folder
        os.makedirs(run_folder)

        # Saving the trained model
        model_file = "trained_model.pkl"
        model_path = os.path.join(run_folder, model_file)
        f = open(model_path, "wb")
        pickle.dump(model, f)
        f.close()

        # Saving the run results
        results_path = user_folder + "/user_results.pkl"
        run_result = {}
        run_result["parameter"] = inputParam
        run_result["output"] = output
        result_dict = {current_run: run_result}

        f = open(results_path, "rb")
        user_results = pickle.load(f)
        user_results.update(result_dict)
        f.close()

        f = open(results_path, "wb")
        pickle.dump(user_results, f)
        f.close()

        print("Task completed!")

    except:
        results_path = user_folder + "/user_results.pkl"
        result_dict = {current_run: "Training error! Try again..."}
        f = open(results_path, "rb")
        user_results = pickle.load(f)
        user_results.update(result_dict)
        f.close()

        f = open(results_path, "wb")
        pickle.dump(user_results, f)
        f.close()

        print("Training Error!")
Пример #18
0
        def reset_data():
            fn.read_data()
            canvas.get_tk_widget().destroy()

            self.queryText.delete('1.0', END)
            self.queryText.insert(END, data)
Пример #19
0
nxh = int(nx / 2)
circular_mask = np.zeros([nz, nxh + 1, ny])
kx_plus = np.fft.rfftfreq(nx) * nx / LMFx / Re_tau
kz_plus = np.fft.fftfreq(nz) * nz / LMFz / Re_tau
for j in range(nz):
    for i in range(nxh + 1):
        if k_plus_cutoff ** 2 - (kx_plus[i] ** 2 + kz_plus[j] ** 2) > 0:
            circular_mask[j, i] = 1.0

# NOTE: path might change based on where you have the data file
# either change to the absolute path of where its stored or create an appropriate symlink
path = "../200-128/"

#  Read velocity/stress data from files
read_start = time.perf_counter()
data = functions.read_data(path, nx, ny, nz, truemeans, nu, circular_mask)
read_end = time.perf_counter()
print("read time: ", read_end - read_start)

# Create a constant scaled version of the raw data in realspace
scale_start = time.perf_counter()
scaled = functions.scale_data(data)
scale_end = time.perf_counter()
print("scale time: ", scale_end - scale_start)

# Save the scaled data to a npy file for quick learning on it
save_start = time.perf_counter()
vels = ["u", "v", "w"]
grads = ["dudx", "dudy", "dudz", "dvdx", "dvdy", "dvdz", "dwdx", "dwdy", "dwdz"]
taus = ["uu", "vv", "ww", "uv", "uw", "vw"]
fields = vels + grads + taus
Пример #20
0
    DATA_DIR = "\\\\neptune\TradingRoom\RESEARCH\OlenaG\PortfolioPicking\BandSize3\\"     
    BandSize = "Bs3"
    
    key_HD = ["1HD","2HD","3HD","4HD","5HD"]        
    key_Stat = ["Relative RPT (BPS)","Relative Signed Win Rate (%)","Number Trades","Stock Return (BPS)",
    "Max Drawdown (%)","Relative Sharpe","Relative KRatio"]
    key_Window = ["2 Year","1 Year","6 Month","3 Month","2 Month","1 Month"]
    
    F2Scores = ["F2(8)","F2(16)"]
    decision_date = datetime.datetime(2012,8,29).date() #datetime.datetime(2012,8,29).date()
    date_start = decision_date
    date_end = datetime.datetime(2013,9,18).date() #datetime.datetime(2013,9,18).date()

    while decision_date <= date_end :
        sDate = decision_date.strftime("%Y%m%d")
    
        DATA = {}
    
        data_for_date_not_found = False
        for fscore in F2Scores:
            fn = DATA_DIR+"ExtraStats_"+fscore+"_Band_MultiHold_"+sDate+".csv"
            if os.access(fn,os.R_OK):
                functions.read_data(fn,DATA,fscore,key_HD,key_Stat,key_Window)
            else:
                data_for_date_not_found = True
       
        if not data_for_date_not_found:    
            stock_names = sorted(DATA.keys())
            #functions.debug_output(DATA,key_HD,key_Stat,key_Window,F2Scores,date=decision_date,fname=MHD)
        
  
Пример #21
0
Created on Fri Jan  6 15:42:58 2017

@author: harti and valsecchi
"""
from functions import read_data, cropped, createIm, normalization, saveIm, binning, oscillation

path_ob = 'data/data_OB'
path_im = 'data/data_smp'
path_dc = 'data/DCs'

bin_fac = None  # no binning either 1 or None, 2x2 binning: bin_fac = 2
norm_param = [3, 5, 20, 40]
crop_param = [10, 15, 80, 60]
oscillationParam = [30, 1, 1, 1]
numberPeriods = 1
savingFolder = 'folder'

im, ob = read_data(path_im, path_ob, path_dc)
#im,ob=normalization(im,ob,*norm_param)
oscillation(im, ob, folder=savingFolder, *oscillationParam)
#im,ob = cropped(im,ob,*crop_param)
#im, ob = binning(im,ob,bin_fac)
ti, dpci, dfi, vis_map = createIm(im, ob, numberPeriods)
saveIm(ti,
       dpci,
       dfi,
       vis_map,
       name='period2',
       folder=savingFolder,
       overWrite=True)
Пример #22
0
    # This outputs a 3-rank tensor of the same shape.
    loss = tf.losses.mean_squared_error(labels=y_true_slice,
                                        predictions=y_pred_slice)

    # Keras may reduce this across the first axis (the batch)
    # but the semantics are unclear, so to be sure we use
    # the loss across the entire tensor, we reduce it to a
    # single scalar with the mean function.
    loss_mean = tf.reduce_mean(loss)

    return loss_mean


##data setup

df = read_data(nrows=20000)
print(df.head(9))
df["Widerstand"] = df["Spannung"] / df["Strom I"]
df["Abstand"] = abs(df["Abstand"] - 4)
#filter df
target_var = ["Abstand"]
used_var = ["Zeit", "Abstand", "Widerstand"]
target_names = target_var
df = df[used_var]
#steps to predict
shift_steps = 10000
df_target = df[target_var].shift(-shift_steps)

##neural network data setup
#data
x_data = df.values[0:-shift_steps]
Пример #23
0
from transformers import BertModel, BertTokenizer, AdamW
import torch.nn.functional as F
import torch.nn as nn
import torch
import nnmodel
import random
from sklearn.model_selection import train_test_split

import importlib
importlib.reload(nnmodel)
import functions as myfun

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 读取数据
trainset = myfun.read_data('./train.tsv')
trainset, valset = train_test_split(trainset, test_size=0.3, random_state=7)

testset = myfun.read_data('./test.tsv')


########################################################################################################################
#                                                              1.
#                                                     RNN 架构文本多分类模型
#                                                      一层RNN+3层全连接
#                                                   glove.6B.50d词向量嵌入
########################################################################################################################
# 定义network
class mynetwork(nn.Module):
    def __init__(self):
        super().__init__()
Пример #24
0
################ Print Chat Connection Info ################
RESPONSE = ""
while RESPONSE.find(f"JOIN {CHANNEL}") < 0:
    RESPONSE = IRC.get_response().rstrip()
    print(RESPONSE + "\n")
################ Print Chat Connection Info ################

################ WORKER INITIALIZATION ################
MESSAGE_CACHE = []  # Initialize the autotracking message queue
CLEANER = workers.MessageQueueCleaner(RESPONSE_TIME, MESSAGE_CACHE)
TALKER_DICT = {}  # Initialize talker dictionary
TRACKER = workers.UserTracker(RESPONSE_TIME, MESSAGE_CACHE, TALKER_DICT)
################ WORKER INITIALIZATION ################

BANNED_WORDS = func.read_data("BANNED_WORDS")

################ MAIN PROCESS EXECUTION ################
while True:
    RESPONSE = IRC.get_response().rstrip()
    print(RESPONSE)

    # Check for swearing
    if func.test_match(BANNED_WORDS, RESPONSE):
        BAD_USER = re.findall(r'(?<=\:)(.*?)(?=\!)',
                              RESPONSE)  #extract username
        if len(BAD_USER) > 0:
            IRC.send(CHANNEL, f"Clean it up @{BAD_USER[0]}!")

    # Check for spam
    if func.test_max(TALKER_DICT, MSG_PER_MINUTE):
#das script plottet das nur die auswertungen der von der fouriertransformation

import matplotlib.pyplot as plt
from functions.read_data import *
from functions.fourier_transform import *
from functions.save_plot import *
#daten einlesen

df = read_data(dataset="000", nrows=300000)
#fouriertransformation durchführen
abstand_transformiert, freq = transform(df, sample_time=6, column="Abstand")
#daten auf ein wertebereich zum plotten zensieren
zens = 0.5
#plotten
freq_zensiert = freq[(freq < zens)]
print(freq_zensiert[:5])
abstand_zensiert = abstand_transformiert[:len(freq_zensiert)]

plt.plot(freq_zensiert, abs(abstand_zensiert))

plt.xlim(0, zens)
plt.ylim(0.0, 0.25)
plt.xlabel(r"\textbf{Frequenz [1/s]}")
plt.ylabel(r"\textbf{Spektrum [mm/s]}")

# df = read_data(dataset="051", nrows = 300000)
#
# abstand_transformiert, freq = transform(df, sample_time=6, column="Abstand.2")
#
# freq_zensiert = freq[(freq < zens)]
# abstand_zensiert = abstand_transformiert[:len(freq_zensiert)]
Пример #26
0
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()

    ap.add_argument("-d",
                    "--data_dir",
                    required=True,
                    help="Path to the images directory")
    ap.add_argument("-i",
                    "--input",
                    type=int,
                    required=True,
                    default=299,
                    help="The input size")
    ap.add_argument("-t",
                    "--tensorboard_path",
                    required=True,
                    help="Path to the model file")
    ap.add_argument("-c",
                    "--checkpoint_dir",
                    required=True,
                    help="Path to the model weights checkpoints")
    ap.add_argument("-s",
                    "--save_dir",
                    required=True,
                    help="Path to the model file")

    args = vars(ap.parse_args())
    size = args["input"]

    # data
    print("Loading data...")
    filenames, labels, num_classes = read_data(args["data_dir"])
    labels = tf.keras.utils.to_categorical(labels)

    train_set = DataGenerator(x_set=filenames,
                              y_set=labels,
                              batch_size=BATCH_SIZE,
                              target_size=(size, size))
    steps_per_epoch = len(filenames) // BATCH_SIZE

    # ml
    print("Designing model...")
    base_model = tf.keras.applications.inception_v3.InceptionV3(
        include_top=False, input_shape=(size, size, 3))
    base_model.trainable = False  # freeze layers

    model = tf.keras.Sequential([
        base_model,
        tf.keras.layers.GlobalAveragePooling2D(),
        tf.keras.layers.Dense(units=1000, activation="relu"),
        tf.keras.layers.Dense(units=num_classes, activation="softmax")
    ])

    # checkpoints
    print("Loading weights...")
    if os.path.exists(args["checkpoint_dir"]):
        try:
            model.load_weights(args["checkpoint_dir"])
        except Exception as e:
            print("Oups!\nSomething turns wrong..\nMaybe Weights mismatch...",
                  e)
    else:
        print("Weights not found")

    # cost function & optimization method
    model.compile(loss="categorical_crossentropy",
                  optimizer="rmsprop",
                  metrics=["accuracy"])

    # calllbacks
    callback_checkpoint = tf.keras.callbacks.ModelCheckpoint(
        filepath=args["checkpoint_dir"], verbose=0, save_weights_only=True)
    callback_tensorboard = tf.keras.callbacks.TensorBoard(
        log_dir=args["tensorboard_path"], write_images=True)
    callbacks = [callback_checkpoint, callback_tensorboard]

    # training
    print("Start training...")
    history = model.fit(train_set,
                        epochs=EPOCHS,
                        steps_per_epoch=steps_per_epoch,
                        callbacks=callbacks)

    # save
    print("Saving model...")
    now = time.localtime()
    model.save(
        os.path.join(args["save_dir"],
                     f"model-{now.tm_mday}-{now.tm_mon}-{now.tm_year}.h5"))
Пример #27
0
my_parser.add_argument('Part',
                       metavar='part',
                       type=str,
                       help='is it part1 or part2 ?')

# Execute the parse_args() method
args = my_parser.parse_args()

part = args.Part

if __name__ == '__main__':

    try:
        input_path = str(os.getcwd() + str(args.Path))
        ds = functions.read_data(input_path)
    except:
        input_path = str(args.Path)
        ds = functions.read_data(input_path)

    if part == 'part1':

        saveds1 = functions.sum_2_2020(ds)

        with open('saveds1.txt', 'w') as file:
            file.writelines("%s\n" % place for place in saveds1)

    else:

        saveds2 = functions.sum_3_2020(ds)
        sensors_to_use = all_sensors[:18]
    elif args.sensors == 'foot':
        sensors_to_use = all_sensors[18:]
    terrain_noise = args.terrain_noise
    signal_noise_std = args.signal_noise
    timesteps = args.timesteps
    data_split = args.data_split
    n_samples = args.n_samples

    destination_name = 'amter_'+noise_params[terrain_noise][0]+'0'+str(signal_noise_std)[2:]+'_'+str(timesteps)+\
                       '_'+args.sensors+'_'+terrains_flag+'_'+str(n_samples)
    destination = '../cache/datasets/amter/'+destination_name+'.ds'

    ''' Reading .txt files to a dict called data '''
    print '\n\n ## Reading .txt data files...'
    data = read_data(noises=[terrain_noise], terrains=terrains_to_use, sensors=sensors_to_use, n_samples=n_samples)

    ''' Cutting samples, normalizing and adding a signal noise '''
    print '\n\n ## Cutting samples, normalizing and adding a signal noise...'
    samples = dict()
    for terrain in terrains_to_use:
        samples[terrain] = [[] for i in range(len(data[terrain_noise][terrain][sensors_to_use[0]]))]
        for sensor in sensors_to_use:
            for i_sample, sample_terrain in enumerate(data[terrain_noise][terrain][sensor]):
                samples[terrain][i_sample] += prepare_signal(signal=sample_terrain[10:timesteps+10], sen=sensor)
        print 'Samples of', terrain, 'cut, normalized and noised.'

    ''' Splitting data '''
    print '\n\n ## Splitting data...'
    x = {'training': list(), 'validation': list(), 'testing': list()}
    y = {'training': list(), 'validation': list(), 'testing': list()}
Пример #29
0
                    dest='dropout',
                    action='store',
                    type=int,
                    default=0.05)
# parser.add_argument('--lr' , dest='lr' , action ='store' , type = int, default= 0.001)
parser.add_argument('--save_directory',
                    dest='save_directory',
                    action='store',
                    default='./checkpoint.pth')

parser = parser.parse_args()
epochs = parser.epochs
lr = parser.lr
structure = parser.structure
dropout = parser.dropout
hidden_layer1 = parser.hidden_layer1
hidden_layer2 = parser.hidden_layer2
power = parser.gpu

train_loaders, valid_loaders, test_loaders = functions.read_data(data_dir)
## load the model
model, optimizer, criterion = functions.model_setup(structure, epochs, dropout,
                                                    hidden_layer1,
                                                    hidden_layer2, lr, power)
## train the model
functions.train(model, epochs, criterion, optimizer, train_loaders,
                valid_loaders, power)
## save the model
functions.save_checkpoint(path, hidden_layer1, hidden_layer2, dropout, lr,
                          epochs)
Пример #30
0
def update_avg_points_graph(prepost_or_year, league, year):
    if prepost_or_year == "year":
        raise PreventUpdate

    df_pre, df_post = read_data(prepost_or_year, league, year)
    df_pre = preprocess_avg_points(df_pre)
    df_post = preprocess_avg_points(df_post)

    # Fix for weird cases where matches from earlier match days were played in post corona time
    df_post = df_post[~df_post["yearMatchday"].isin(df_pre["yearMatchday"].unique())]

    points_df_pre = pd.DataFrame(
        0,
        index=df_pre["yearMatchday"].unique(),
        columns=["homeTeamPoints", "awayTeamPoints", "numberOfMatches"],
    )
    points_df_post = pd.DataFrame(
        0,
        index=df_post["yearMatchday"].unique(),
        columns=["homeTeamPoints", "awayTeamPoints", "numberOfMatches"],
    )

    points_df_pre = fill_points_df(df_pre, points_df_pre)
    points_df_post = fill_points_df(df_post, points_df_post)

    trace1_1 = go.Scatter(
        x=points_df_pre.index,
        y=points_df_pre["homeAvgPoints"],
        mode="lines",
        name="Average home team points before corona",
        line=dict(color="mediumseagreen"),
    )
    trace1_2 = go.Scatter(
        x=points_df_pre.index,
        y=points_df_pre["awayAvgPoints"],
        mode="lines",
        name="Average away team points before corona",
        line=dict(color="indianred"),
    )
    trace1_3 = go.Scatter(
        x=points_df_post.index,
        y=points_df_post["homeAvgPoints"],
        mode="lines",
        name="Average home team points after corona",
        line=dict(color="seagreen"),
    )
    trace1_4 = go.Scatter(
        x=points_df_post.index,
        y=points_df_post["awayAvgPoints"],
        mode="lines",
        name="Average away team points after corona",
        line=dict(color="firebrick"),
    )

    trace2_1 = go.Scatter(
        x=points_df_pre.index,
        y=points_df_pre["maHomePoints"],
        mode="lines",
        name="Average home team points before corona",
        line=dict(color="mediumseagreen"),
        visible=False,
    )
    trace2_2 = go.Scatter(
        x=points_df_pre.index,
        y=points_df_pre["maAwayPoints"],
        mode="lines",
        name="Average away team points before corona",
        line=dict(color="indianred"),
        visible=False,
    )
    trace2_3 = go.Scatter(
        x=points_df_post.index,
        y=points_df_post["maHomePoints"],
        mode="lines",
        name="Average home team points after corona",
        line=dict(color="seagreen"),
        visible=False,
    )
    trace2_4 = go.Scatter(
        x=points_df_post.index,
        y=points_df_post["maAwayPoints"],
        mode="lines",
        name="Average away team points after corona",
        line=dict(color="firebrick"),
        visible=False,
    )

    data = [
        trace1_1,
        trace1_2,
        trace1_3,
        trace1_4,
        trace2_1,
        trace2_2,
        trace2_3,
        trace2_4,
    ]

    updatemenus = list(
        [
            dict(
                active=0,
                showactive=True,
                x=0.57,
                y=1.2,
                buttons=list(
                    [
                        dict(
                            label="Average per matchday",
                            method="update",
                            args=[
                                {
                                    "visible": [
                                        True,
                                        True,
                                        True,
                                        True,
                                        False,
                                        False,
                                        False,
                                        False,
                                    ]
                                }
                            ],
                        ),
                        dict(
                            label="Rolling average over 3 matchdays",
                            method="update",
                            args=[
                                {
                                    "visible": [
                                        False,
                                        False,
                                        False,
                                        False,
                                        True,
                                        True,
                                        True,
                                        True,
                                    ]
                                }
                            ],
                        ),
                    ]
                ),
            )
        ]
    )

    layout = dict(
        showlegend=True,
        xaxis=dict(title="Year and matchday"),
        yaxis=dict(title="Points"),
        updatemenus=updatemenus,
    )

    fig = dict(data=data, layout=layout)

    text = f"## Average points for home and away teams in the {league}"

    return fig, text
Пример #31
0
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from functions import read_data, neural_net_model, denormalize

_, _, X_test, y_test, df_test, _ = read_data()

xs = tf.placeholder("float")
output = neural_net_model(xs, 3)
saver = tf.train.Saver()
with tf.Session() as session:
    #Load NN
    saver.restore(session, './NN_ex.ckpt')
    # Evalute test data
    pred = session.run(output, feed_dict={xs: X_test})
    y_test = denormalize(df_test, y_test)
    pred = denormalize(df_test, pred)
    #Plot results
    plt.plot(range(y_test.shape[0]), y_test, label="Original Data")
    plt.plot(range(y_test.shape[0]), pred, label="Predicted Data")
    plt.legend(loc='best')
    plt.ylabel('Stock Value')
    plt.xlabel('Days')
    plt.title('Stock Market Nifty')
    plt.show()
Пример #32
0
from tensorflow import keras
import numpy as np
import random
from matplotlib import pyplot as plt
from keras.callbacks import History, TensorBoard, ReduceLROnPlateau
from keras.models import Sequential
from keras.layers import Dense, Flatten, LSTM, Dropout
from keras.layers import Conv1D, GlobalAveragePooling1D, MaxPooling1D
from keras import metrics
import functions
import csv
import sys

arg = sys.argv[1]
X, Y = functions.read_data(arg)
file_name = arg + "_CNN"
epochs = functions.epochs

X, Y = np.array(X), np.array(Y)
X = np.reshape(X, (X.shape[0], X.shape[1], 1))
Y = np.reshape(Y, (X.shape[0], 1))
X_train = X[0:2000, :, :]
Y_train = Y[0:2000]
X_test = X[2000:, :, :]
Y_test = Y[2000:]   


model = Sequential ()
model.add(Conv1D(64, 3, activation='relu', input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
    print '\n ## Loaded mnist pruned nets ('+str(n_obs)+' obs) ## --------------------'
    print '@ Pruning steps:\t', max_len
    print '@ Mean n synapses:\t', kitt_mnist['n_syn_mean'][-1]
    print '@ Mean structure:\t', kitt_mnist['structure_mean'][-1]
    print '@ Mean accuracy:\t', kitt_mnist['acc_mean'][-1]
    print '----------------------------------------------------'

    return kitt_mnist


if __name__ == '__main__':
    sensors_ranges, terrain_types, all_sensors = load_params('sensors_ranges', 'terrain_types', 'sensors')
    # loading examples
    terrains = [terrain_types[str(i)] for i in [6, 8, 15]]
    data = read_data(noises=['no_noise'], terrains=terrains, sensors=all_sensors, n_samples=10)
    samples = dict()
    for terrain in terrains:
        samples[terrain] = [[] for i in range(len(data['no_noise'][terrain][all_sensors[0]]))]
        for sensor in all_sensors:
            for i_sample, sample_terrain in enumerate(data['no_noise'][terrain][sensor]):
                samples[terrain][i_sample] += prepare_signal(signal=sample_terrain[10:40 + 10], sen=sensor)

    nets = load_amter(na='nn')
    #nets = load_mnist()

    net = nets['net'][-1]
    structure = net[0]
    features = range(1, structure[0] + 1)
    classes = range(1, structure[2] + 1)
    syn_exist = net[4]