Exemplo n.º 1
0
def plot_png(path):
    plt.cla() # Don't create fig and ax here since plotlib keeps them alive

    N, t_min, t_max, R_0, I_0, R0, D, y_max, t0_date = params()

    t, S, I, R = run_model(R0, D, N, I_0, R_0, t_min, t_max)
    plot(ax, t, S, I, R, t0_date, y_max)

    output = io.BytesIO()
    FigureCanvasAgg(fig).print_png(output)
    return Response(output.getvalue(), mimetype='image/png')
Exemplo n.º 2
0
def train_model(args, start_time):
    m = model.assemble(args.drpo_rate, args.enable_bn)
    plot_model(m, to_file='model.png')

    XY_train, XY_dev = load_trdev_datasets()

    start = clock.time()

    out_fd = os.path.join(args.output_dir, 'model_{}_epch{}_bs{}_trn{}_val{}' \
        .format(start_time, args.num_epochs, args.batch_size,
            len(XY_train[0]), len(XY_dev[0])))
    os.makedirs(out_fd)

    es_fp = os.path.join(out_fd, 'model-es.h5') if args.save_model else None

    model.train(m, XY_train, XY_dev, args.num_epochs, args.batch_size, es_fp)
    end = clock.time()
    print("Training model took {} seconds.".format(end - start))

    # Save outputs
    print('Processing: ' + out_fd)

    model.write_summary(m, out_fd)
    if args.save_model:
        model.save_final(m, out_fd)
    model.plot(out_fd)

    # Predict a few samples
    train_idx = [0, 60, 100, 400]
    for i in train_idx:
        model.predict_imgs(m, (XY_train[0][i:i + 1], XY_train[1][i:i + 1]),
                           out_fd, 'train' + str(i))

    dev_idx = [0, 10, 20, 40]
    for i in dev_idx:
        model.predict_imgs(m, (XY_dev[0][i:i + 1], XY_dev[1][i:i + 1]), out_fd,
                           'dev' + str(i))

    print('  Complete.')
Exemplo n.º 3
0
def main():
    args = parse_args()

    if args.method not in ClassifierMap.keys():
        print("Method ", args.method, " not available.")
        sys.exit(1)

    print("Generate the data set with: ", args.s.split(","))
    filename = data_reader.generate_data(Path(args.data), args.s.split(","))
    print(filename)
    if args.method == "mnb" or args.method == "bnb":
        print("Bayes cleaner")
        cleaner = dc.bayes if args.type == "detection" else dc.bayes_family
    else:
        print("Linear Classifier cleaner")
        cleaner = dc.linear if args.type == "detection" else dc.linear_family
    X, y = cleaner(filename)

    classifier = ClassifierMap[args.method][0]
    print("Train and evaluation of the model...")
    print(ClassifierMap[args.method][1] + " " + args.s)
    # START Mesure execution time
    start = time.time()
    kfold = KFold(10, True, 1)
    s_kfold = StratifiedKFold(10, True, 1)
    accuracy, precision, recall, fscore, auc = model.validation(
        X, y, classifier, kfold, args.type)
    end = time.time()
    print("Accuracy: " + repr(accuracy))
    print("Precision: " + repr(precision))
    print("Recall: " + repr(recall))
    print("F1 score: " + repr(fscore))
    print("AUC Score: " + repr(auc))
    print("Execution Time: " + utils.timer(start, end))
    print("")
    print("Plotting learning curve...")
    model.plot(X, y, classifier, s_kfold)
    print("Done.")
import model
if __name__ == "__main__":
    # execute only if run as a script
    inputs = model.calculate(950715, run_plot=True)
    model.plot(**inputs)
Exemplo n.º 5
0
                                             freezeWeights_list):

            ## LOAD MODEL
            if True:
                net = ContextNet(K, N, Kc, Nc, Nhid, savesuffix)
            else:
                model.load_state_dict(state['state_dict'])
                optimizer.load_state_dict(state['optimizer'])

            ## RUN
            Tactual_cont = M.generateTrainDat(net)
            net, lossall = M.train(net, nepochs, Tactual_cont, lr, ntrain,
                                   freezeWeights)
            Tdat_cont = M.evaluate(net, ntest, Tactual_cont)

            M.plot(net, Tactual_cont, Tdat_cont, lossall)
            print(net.savedir)

            # === save
            # save model
            # M.save(net)

            # ######### SAVE
            state = {
                "Kc": Kc,
                "Nc": Nc,
                "Nhid": Nhid,
                "Tactual_cont": Tactual_cont,
                "Tdat_cont": Tdat_cont,
                "K": K,
                "N": N,
Exemplo n.º 6
0
plt.switch_backend('agg')
from keras import backend as K
from keras.engine.topology import Layer
from keras import initializers
#%matplotlib inline
import pickle
import model


texts = []
labels = []



df = pd.read_csv('../dataset/dataset.csv')
df = df.dropna()
df = df.reset_index(drop=True)
print("Information on the dataset")
print('Shape of dataset ', df.shape)
print(df.columns)
print('No. of unique news types: ', len(set(df['Type'])))
print(df.head())


texts, labels, sorted_type, indexed_type = model.df_to_list(df, texts, labels)
pickle.dump(indexed_type, open('indexed_type.sav', 'wb'))
word_index, embedding_matrix, data, labels, sequences = model.tokenize(texts, labels)
model, history = model.model(word_index, embedding_matrix, sorted_type, data, labels)
model.save_model(model)
model.plot(history)
np.random.seed(20180718)
random.seed(20180718)
  
iris = datasets.load_iris()

data = iris["data"]
label = iris["target"]
num_label = len(np.unique(label))

[train_data, test_data, train_label, test_label] = train_test_split(data, label, test_size=0.2, shuffle=True)

train_data = np.stack(train_data)
test_data = np.stack(test_data)
train_label = np.stack(train_label)
test_label = np.stack(test_label)

layers_dim = [4,4,4]

model = model.vanilla_nural_network(layers_dim)
model.train(train_data, train_label,
            iteration=200000,
            learning_rate=0.001,
            lambd = 0,
            keep_prob = 1,
            interrupt_threshold = 0.1,
            print_loss = True)
model.plot()
prob = model.predict(test_data)
predicet_label = np.argmax(prob,  axis=1)
print("Test Accuracy: %.f%%" % (np.mean(test_label == predicet_label) * 100))
import pandas as pd
import preprocessing
import model
import datetime as dt
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib
from sklearn.preprocessing import MinMaxScaler
import sys

feature = sys.argv[1]

df = pd.read_csv("../Arute_case/ds_exercise_data.csv")
df = preprocessing.datetime_partitions(df)
df = preprocessing.add_holiday(df)
df = preprocessing.fillna_with_mean(df)

# CashIn
scaler = MinMaxScaler(feature_range=(0, 1))

prediction, actual = model.LSTM_model(df, str(feature), scaler, train_size=0.6, window_size=365)

result = pd.concat([pd.DataFrame(scaler.inverse_transform(prediction[:, 0].reshape(1, -1)).T),
                    pd.DataFrame(scaler.inverse_transform(actual.reshape(1, -1))).T], axis=1)
result.columns = ['predicted', 'actual']

print(result)

model.plot(result['predicted'], result['actual'])